=== RUN TestAddons/parallel/Registry
=== PAUSE TestAddons/parallel/Registry
=== CONT TestAddons/parallel/Registry
addons_test.go:328: registry stabilized in 1.963057ms
addons_test.go:330: (dbg) TestAddons/parallel/Registry: waiting 6m0s for pods matching "actual-registry=true" in namespace "kube-system" ...
helpers_test.go:344: "registry-66c9cd494c-tvxjt" [9c3d099a-ff8c-4fd5-8178-5ad9ed8d1f67] Running
addons_test.go:330: (dbg) TestAddons/parallel/Registry: actual-registry=true healthy within 5.003396779s
addons_test.go:333: (dbg) TestAddons/parallel/Registry: waiting 10m0s for pods matching "registry-proxy=true" in namespace "kube-system" ...
helpers_test.go:344: "registry-proxy-xwflp" [a13fee98-c1a9-4518-844c-031a36bf17b4] Running
addons_test.go:333: (dbg) TestAddons/parallel/Registry: registry-proxy=true healthy within 5.003804778s
addons_test.go:338: (dbg) Run: kubectl --context addons-242108 delete po -l run=registry-test --now
addons_test.go:343: (dbg) Run: kubectl --context addons-242108 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c "wget --spider -S http://registry.kube-system.svc.cluster.local"
addons_test.go:343: (dbg) Non-zero exit: kubectl --context addons-242108 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c "wget --spider -S http://registry.kube-system.svc.cluster.local": exit status 1 (1m0.07716622s)
-- stdout --
pod "registry-test" deleted
-- /stdout --
** stderr **
error: timed out waiting for the condition
** /stderr **
addons_test.go:345: failed to hit registry.kube-system.svc.cluster.local. args "kubectl --context addons-242108 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c \"wget --spider -S http://registry.kube-system.svc.cluster.local\"" failed: exit status 1
addons_test.go:349: expected curl response be "HTTP/1.1 200", but got *pod "registry-test" deleted
*
addons_test.go:357: (dbg) Run: out/minikube-linux-amd64 -p addons-242108 ip
2024/09/23 12:18:49 [DEBUG] GET http://192.168.49.2:5000
addons_test.go:386: (dbg) Run: out/minikube-linux-amd64 -p addons-242108 addons disable registry --alsologtostderr -v=1
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======> post-mortem[TestAddons/parallel/Registry]: docker inspect <======
helpers_test.go:231: (dbg) Run: docker inspect addons-242108
helpers_test.go:235: (dbg) docker inspect addons-242108:
-- stdout --
[
{
"Id": "1d059c9109bf05ff49d91d991611774aaa22c939a6d0936e5ad5eda2b700becd",
"Created": "2024-09-23T12:05:48.452497758Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 1858751,
"ExitCode": 0,
"Error": "",
"StartedAt": "2024-09-23T12:05:48.583701691Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:d94335c0cd164ddebb3c5158e317bcf6d2e08dc08f448d25251f425acb842829",
"ResolvConfPath": "/var/lib/docker/containers/1d059c9109bf05ff49d91d991611774aaa22c939a6d0936e5ad5eda2b700becd/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/1d059c9109bf05ff49d91d991611774aaa22c939a6d0936e5ad5eda2b700becd/hostname",
"HostsPath": "/var/lib/docker/containers/1d059c9109bf05ff49d91d991611774aaa22c939a6d0936e5ad5eda2b700becd/hosts",
"LogPath": "/var/lib/docker/containers/1d059c9109bf05ff49d91d991611774aaa22c939a6d0936e5ad5eda2b700becd/1d059c9109bf05ff49d91d991611774aaa22c939a6d0936e5ad5eda2b700becd-json.log",
"Name": "/addons-242108",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"addons-242108:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "addons-242108",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 4194304000,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 8388608000,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"LowerDir": "/var/lib/docker/overlay2/a6c526568d48af41b6ae29586309deaa4096385bf2a555fda8c1a1c9ac096589-init/diff:/var/lib/docker/overlay2/cd207faa7742606cda57be0e5f49a5c7faca8c45dd7d87c1907737c72f05e0a8/diff",
"MergedDir": "/var/lib/docker/overlay2/a6c526568d48af41b6ae29586309deaa4096385bf2a555fda8c1a1c9ac096589/merged",
"UpperDir": "/var/lib/docker/overlay2/a6c526568d48af41b6ae29586309deaa4096385bf2a555fda8c1a1c9ac096589/diff",
"WorkDir": "/var/lib/docker/overlay2/a6c526568d48af41b6ae29586309deaa4096385bf2a555fda8c1a1c9ac096589/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "addons-242108",
"Source": "/var/lib/docker/volumes/addons-242108/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "addons-242108",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "addons-242108",
"name.minikube.sigs.k8s.io": "addons-242108",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "595a8061e107e39cdfe926da17cc6a41dd66929d8f37c6d4f50bbffbbc9aca1b",
"SandboxKey": "/var/run/docker/netns/595a8061e107",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32768"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32769"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32772"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32770"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32771"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"addons-242108": {
"IPAMConfig": {
"IPv4Address": "192.168.49.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "02:42:c0:a8:31:02",
"DriverOpts": null,
"NetworkID": "582e692b457ce90f2a596dd74079ab78ff89a1eb8c7247467c95aa3ee1725a13",
"EndpointID": "bcfd292950df1c3a99647db7c4adbf691d369555faa45dc94a0e287b690530cc",
"Gateway": "192.168.49.1",
"IPAddress": "192.168.49.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"addons-242108",
"1d059c9109bf"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:239: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p addons-242108 -n addons-242108
helpers_test.go:244: <<< TestAddons/parallel/Registry FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======> post-mortem[TestAddons/parallel/Registry]: minikube logs <======
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 -p addons-242108 logs -n 25
helpers_test.go:252: TestAddons/parallel/Registry logs:
-- stdout --
==> Audit <==
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
| Command | Args | Profile | User | Version | Start Time | End Time |
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
| start | --download-only -p | download-docker-430304 | jenkins | v1.34.0 | 23 Sep 24 12:05 UTC | |
| | download-docker-430304 | | | | | |
| | --alsologtostderr | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| delete | -p download-docker-430304 | download-docker-430304 | jenkins | v1.34.0 | 23 Sep 24 12:05 UTC | 23 Sep 24 12:05 UTC |
| start | --download-only -p | binary-mirror-059677 | jenkins | v1.34.0 | 23 Sep 24 12:05 UTC | |
| | binary-mirror-059677 | | | | | |
| | --alsologtostderr | | | | | |
| | --binary-mirror | | | | | |
| | http://127.0.0.1:42311 | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| delete | -p binary-mirror-059677 | binary-mirror-059677 | jenkins | v1.34.0 | 23 Sep 24 12:05 UTC | 23 Sep 24 12:05 UTC |
| addons | enable dashboard -p | addons-242108 | jenkins | v1.34.0 | 23 Sep 24 12:05 UTC | |
| | addons-242108 | | | | | |
| addons | disable dashboard -p | addons-242108 | jenkins | v1.34.0 | 23 Sep 24 12:05 UTC | |
| | addons-242108 | | | | | |
| start | -p addons-242108 --wait=true | addons-242108 | jenkins | v1.34.0 | 23 Sep 24 12:05 UTC | 23 Sep 24 12:08 UTC |
| | --memory=4000 --alsologtostderr | | | | | |
| | --addons=registry | | | | | |
| | --addons=metrics-server | | | | | |
| | --addons=volumesnapshots | | | | | |
| | --addons=csi-hostpath-driver | | | | | |
| | --addons=gcp-auth | | | | | |
| | --addons=cloud-spanner | | | | | |
| | --addons=inspektor-gadget | | | | | |
| | --addons=storage-provisioner-rancher | | | | | |
| | --addons=nvidia-device-plugin | | | | | |
| | --addons=yakd --addons=volcano | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| | --addons=ingress | | | | | |
| | --addons=ingress-dns | | | | | |
| addons | addons-242108 addons disable | addons-242108 | jenkins | v1.34.0 | 23 Sep 24 12:09 UTC | 23 Sep 24 12:09 UTC |
| | volcano --alsologtostderr -v=1 | | | | | |
| addons | enable headlamp | addons-242108 | jenkins | v1.34.0 | 23 Sep 24 12:17 UTC | 23 Sep 24 12:17 UTC |
| | -p addons-242108 | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-242108 addons | addons-242108 | jenkins | v1.34.0 | 23 Sep 24 12:17 UTC | 23 Sep 24 12:17 UTC |
| | disable metrics-server | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | disable nvidia-device-plugin | addons-242108 | jenkins | v1.34.0 | 23 Sep 24 12:17 UTC | 23 Sep 24 12:17 UTC |
| | -p addons-242108 | | | | | |
| addons | addons-242108 addons disable | addons-242108 | jenkins | v1.34.0 | 23 Sep 24 12:17 UTC | 23 Sep 24 12:17 UTC |
| | headlamp --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | addons-242108 addons disable | addons-242108 | jenkins | v1.34.0 | 23 Sep 24 12:17 UTC | 23 Sep 24 12:18 UTC |
| | yakd --alsologtostderr -v=1 | | | | | |
| addons | disable inspektor-gadget -p | addons-242108 | jenkins | v1.34.0 | 23 Sep 24 12:18 UTC | 23 Sep 24 12:18 UTC |
| | addons-242108 | | | | | |
| ssh | addons-242108 ssh curl -s | addons-242108 | jenkins | v1.34.0 | 23 Sep 24 12:18 UTC | 23 Sep 24 12:18 UTC |
| | http://127.0.0.1/ -H 'Host: | | | | | |
| | nginx.example.com' | | | | | |
| ip | addons-242108 ip | addons-242108 | jenkins | v1.34.0 | 23 Sep 24 12:18 UTC | 23 Sep 24 12:18 UTC |
| addons | addons-242108 addons disable | addons-242108 | jenkins | v1.34.0 | 23 Sep 24 12:18 UTC | 23 Sep 24 12:18 UTC |
| | ingress-dns --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | addons-242108 addons disable | addons-242108 | jenkins | v1.34.0 | 23 Sep 24 12:18 UTC | 23 Sep 24 12:18 UTC |
| | ingress --alsologtostderr -v=1 | | | | | |
| addons | addons-242108 addons | addons-242108 | jenkins | v1.34.0 | 23 Sep 24 12:18 UTC | 23 Sep 24 12:18 UTC |
| | disable csi-hostpath-driver | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-242108 addons | addons-242108 | jenkins | v1.34.0 | 23 Sep 24 12:18 UTC | 23 Sep 24 12:18 UTC |
| | disable volumesnapshots | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | disable cloud-spanner -p | addons-242108 | jenkins | v1.34.0 | 23 Sep 24 12:18 UTC | 23 Sep 24 12:18 UTC |
| | addons-242108 | | | | | |
| ssh | addons-242108 ssh cat | addons-242108 | jenkins | v1.34.0 | 23 Sep 24 12:18 UTC | 23 Sep 24 12:18 UTC |
| | /opt/local-path-provisioner/pvc-f744a326-bb9b-48c7-a6ae-1eba57c6e796_default_test-pvc/file1 | | | | | |
| addons | addons-242108 addons disable | addons-242108 | jenkins | v1.34.0 | 23 Sep 24 12:18 UTC | |
| | storage-provisioner-rancher | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| ip | addons-242108 ip | addons-242108 | jenkins | v1.34.0 | 23 Sep 24 12:18 UTC | 23 Sep 24 12:18 UTC |
| addons | addons-242108 addons disable | addons-242108 | jenkins | v1.34.0 | 23 Sep 24 12:18 UTC | 23 Sep 24 12:18 UTC |
| | registry --alsologtostderr | | | | | |
| | -v=1 | | | | | |
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
==> Last Start <==
Log file created at: 2024/09/23 12:05:27
Running on machine: ubuntu-20-agent-3
Binary: Built with gc go1.23.0 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0923 12:05:27.009450 1858014 out.go:345] Setting OutFile to fd 1 ...
I0923 12:05:27.009572 1858014 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0923 12:05:27.009582 1858014 out.go:358] Setting ErrFile to fd 2...
I0923 12:05:27.009586 1858014 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0923 12:05:27.009779 1858014 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19690-1849651/.minikube/bin
I0923 12:05:27.010382 1858014 out.go:352] Setting JSON to false
I0923 12:05:27.011262 1858014 start.go:129] hostinfo: {"hostname":"ubuntu-20-agent-3","uptime":157628,"bootTime":1726935499,"procs":178,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1069-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I0923 12:05:27.011383 1858014 start.go:139] virtualization: kvm guest
I0923 12:05:27.013356 1858014 out.go:177] * [addons-242108] minikube v1.34.0 on Ubuntu 20.04 (kvm/amd64)
I0923 12:05:27.014479 1858014 out.go:177] - MINIKUBE_LOCATION=19690
I0923 12:05:27.014533 1858014 notify.go:220] Checking for updates...
I0923 12:05:27.016615 1858014 out.go:177] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0923 12:05:27.017799 1858014 out.go:177] - KUBECONFIG=/home/jenkins/minikube-integration/19690-1849651/kubeconfig
I0923 12:05:27.018952 1858014 out.go:177] - MINIKUBE_HOME=/home/jenkins/minikube-integration/19690-1849651/.minikube
I0923 12:05:27.020041 1858014 out.go:177] - MINIKUBE_BIN=out/minikube-linux-amd64
I0923 12:05:27.021119 1858014 out.go:177] - MINIKUBE_FORCE_SYSTEMD=
I0923 12:05:27.022416 1858014 driver.go:394] Setting default libvirt URI to qemu:///system
I0923 12:05:27.044425 1858014 docker.go:123] docker version: linux-27.3.1:Docker Engine - Community
I0923 12:05:27.044558 1858014 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0923 12:05:27.090450 1858014 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:26 OomKillDisable:true NGoroutines:45 SystemTime:2024-09-23 12:05:27.081537465 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1069-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33647939584 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-3 Labels:[] ExperimentalBuild:false ServerVersion:27.3.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c Expected:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c} RuncCommit:{ID:v1.1.14-0-g2c9f560 Expected:v1.1.14-0-g2c9f560} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:[WARNING: bri
dge-nf-call-iptables is disabled WARNING: bridge-nf-call-ip6tables is disabled] ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.17.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.7] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0923 12:05:27.090571 1858014 docker.go:318] overlay module found
I0923 12:05:27.092190 1858014 out.go:177] * Using the docker driver based on user configuration
I0923 12:05:27.093092 1858014 start.go:297] selected driver: docker
I0923 12:05:27.093102 1858014 start.go:901] validating driver "docker" against <nil>
I0923 12:05:27.093114 1858014 start.go:912] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0923 12:05:27.093894 1858014 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0923 12:05:27.137251 1858014 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:26 OomKillDisable:true NGoroutines:45 SystemTime:2024-09-23 12:05:27.128750071 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1069-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33647939584 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-3 Labels:[] ExperimentalBuild:false ServerVersion:27.3.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c Expected:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c} RuncCommit:{ID:v1.1.14-0-g2c9f560 Expected:v1.1.14-0-g2c9f560} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:[WARNING: bri
dge-nf-call-iptables is disabled WARNING: bridge-nf-call-ip6tables is disabled] ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.17.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.7] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0923 12:05:27.137415 1858014 start_flags.go:310] no existing cluster config was found, will generate one from the flags
I0923 12:05:27.137715 1858014 start_flags.go:947] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0923 12:05:27.139371 1858014 out.go:177] * Using Docker driver with root privileges
I0923 12:05:27.140600 1858014 cni.go:84] Creating CNI manager for ""
I0923 12:05:27.140678 1858014 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0923 12:05:27.140694 1858014 start_flags.go:319] Found "bridge CNI" CNI - setting NetworkPlugin=cni
I0923 12:05:27.140779 1858014 start.go:340] cluster config:
{Name:addons-242108 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-242108 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime
:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock:
SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0923 12:05:27.142115 1858014 out.go:177] * Starting "addons-242108" primary control-plane node in "addons-242108" cluster
I0923 12:05:27.143218 1858014 cache.go:121] Beginning downloading kic base image for docker with docker
I0923 12:05:27.144458 1858014 out.go:177] * Pulling base image v0.0.45-1726784731-19672 ...
I0923 12:05:27.145411 1858014 preload.go:131] Checking if preload exists for k8s version v1.31.1 and runtime docker
I0923 12:05:27.145442 1858014 preload.go:146] Found local preload: /home/jenkins/minikube-integration/19690-1849651/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-docker-overlay2-amd64.tar.lz4
I0923 12:05:27.145446 1858014 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed in local docker daemon
I0923 12:05:27.145454 1858014 cache.go:56] Caching tarball of preloaded images
I0923 12:05:27.145626 1858014 preload.go:172] Found /home/jenkins/minikube-integration/19690-1849651/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I0923 12:05:27.145638 1858014 cache.go:59] Finished verifying existence of preloaded tar for v1.31.1 on docker
I0923 12:05:27.145946 1858014 profile.go:143] Saving config to /home/jenkins/minikube-integration/19690-1849651/.minikube/profiles/addons-242108/config.json ...
I0923 12:05:27.145966 1858014 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19690-1849651/.minikube/profiles/addons-242108/config.json: {Name:mkd5d6707c74e6593e8c7b7dce14ea3611df030c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0923 12:05:27.161201 1858014 cache.go:149] Downloading gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed to local cache
I0923 12:05:27.161317 1858014 image.go:63] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed in local cache directory
I0923 12:05:27.161333 1858014 image.go:66] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed in local cache directory, skipping pull
I0923 12:05:27.161338 1858014 image.go:135] gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed exists in cache, skipping pull
I0923 12:05:27.161348 1858014 cache.go:152] successfully saved gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed as a tarball
I0923 12:05:27.161353 1858014 cache.go:162] Loading gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed from local cache
I0923 12:05:39.459476 1858014 cache.go:164] successfully loaded and using gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed from cached tarball
I0923 12:05:39.459512 1858014 cache.go:194] Successfully downloaded all kic artifacts
I0923 12:05:39.459559 1858014 start.go:360] acquireMachinesLock for addons-242108: {Name:mkdb045e3e1a6ec81bff9a760fec943f228445c7 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0923 12:05:39.459672 1858014 start.go:364] duration metric: took 93.322µs to acquireMachinesLock for "addons-242108"
I0923 12:05:39.459717 1858014 start.go:93] Provisioning new machine with config: &{Name:addons-242108 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-242108 Namespace:default APIServerHAVIP: APIServerName:min
ikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false Cust
omQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}
I0923 12:05:39.459808 1858014 start.go:125] createHost starting for "" (driver="docker")
I0923 12:05:39.517635 1858014 out.go:235] * Creating docker container (CPUs=2, Memory=4000MB) ...
I0923 12:05:39.517917 1858014 start.go:159] libmachine.API.Create for "addons-242108" (driver="docker")
I0923 12:05:39.517954 1858014 client.go:168] LocalClient.Create starting
I0923 12:05:39.518072 1858014 main.go:141] libmachine: Creating CA: /home/jenkins/minikube-integration/19690-1849651/.minikube/certs/ca.pem
I0923 12:05:39.562684 1858014 main.go:141] libmachine: Creating client certificate: /home/jenkins/minikube-integration/19690-1849651/.minikube/certs/cert.pem
I0923 12:05:39.746493 1858014 cli_runner.go:164] Run: docker network inspect addons-242108 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0923 12:05:39.762535 1858014 cli_runner.go:211] docker network inspect addons-242108 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0923 12:05:39.762600 1858014 network_create.go:284] running [docker network inspect addons-242108] to gather additional debugging logs...
I0923 12:05:39.762621 1858014 cli_runner.go:164] Run: docker network inspect addons-242108
W0923 12:05:39.780542 1858014 cli_runner.go:211] docker network inspect addons-242108 returned with exit code 1
I0923 12:05:39.780573 1858014 network_create.go:287] error running [docker network inspect addons-242108]: docker network inspect addons-242108: exit status 1
stdout:
[]
stderr:
Error response from daemon: network addons-242108 not found
I0923 12:05:39.780604 1858014 network_create.go:289] output of [docker network inspect addons-242108]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network addons-242108 not found
** /stderr **
I0923 12:05:39.780695 1858014 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0923 12:05:39.797035 1858014 network.go:206] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001b90b70}
I0923 12:05:39.797084 1858014 network_create.go:124] attempt to create docker network addons-242108 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
I0923 12:05:39.797131 1858014 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=addons-242108 addons-242108
I0923 12:05:39.976510 1858014 network_create.go:108] docker network addons-242108 192.168.49.0/24 created
I0923 12:05:39.976549 1858014 kic.go:121] calculated static IP "192.168.49.2" for the "addons-242108" container
I0923 12:05:39.976621 1858014 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0923 12:05:39.992383 1858014 cli_runner.go:164] Run: docker volume create addons-242108 --label name.minikube.sigs.k8s.io=addons-242108 --label created_by.minikube.sigs.k8s.io=true
I0923 12:05:40.031784 1858014 oci.go:103] Successfully created a docker volume addons-242108
I0923 12:05:40.031878 1858014 cli_runner.go:164] Run: docker run --rm --name addons-242108-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-242108 --entrypoint /usr/bin/test -v addons-242108:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed -d /var/lib
I0923 12:05:44.475668 1858014 cli_runner.go:217] Completed: docker run --rm --name addons-242108-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-242108 --entrypoint /usr/bin/test -v addons-242108:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed -d /var/lib: (4.44373152s)
I0923 12:05:44.475704 1858014 oci.go:107] Successfully prepared a docker volume addons-242108
I0923 12:05:44.475736 1858014 preload.go:131] Checking if preload exists for k8s version v1.31.1 and runtime docker
I0923 12:05:44.475763 1858014 kic.go:194] Starting extracting preloaded images to volume ...
I0923 12:05:44.475843 1858014 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/19690-1849651/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v addons-242108:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed -I lz4 -xf /preloaded.tar -C /extractDir
I0923 12:05:48.389708 1858014 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/19690-1849651/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v addons-242108:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed -I lz4 -xf /preloaded.tar -C /extractDir: (3.913810893s)
I0923 12:05:48.389740 1858014 kic.go:203] duration metric: took 3.913974214s to extract preloaded images to volume ...
W0923 12:05:48.389907 1858014 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I0923 12:05:48.390030 1858014 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0923 12:05:48.438563 1858014 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname addons-242108 --name addons-242108 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-242108 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=addons-242108 --network addons-242108 --ip 192.168.49.2 --volume addons-242108:/var --security-opt apparmor=unconfined --memory=4000mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed
I0923 12:05:48.749598 1858014 cli_runner.go:164] Run: docker container inspect addons-242108 --format={{.State.Running}}
I0923 12:05:48.766615 1858014 cli_runner.go:164] Run: docker container inspect addons-242108 --format={{.State.Status}}
I0923 12:05:48.783444 1858014 cli_runner.go:164] Run: docker exec addons-242108 stat /var/lib/dpkg/alternatives/iptables
I0923 12:05:48.825224 1858014 oci.go:144] the created container "addons-242108" has a running status.
I0923 12:05:48.825254 1858014 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/19690-1849651/.minikube/machines/addons-242108/id_rsa...
I0923 12:05:48.949930 1858014 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/19690-1849651/.minikube/machines/addons-242108/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0923 12:05:48.969818 1858014 cli_runner.go:164] Run: docker container inspect addons-242108 --format={{.State.Status}}
I0923 12:05:48.988507 1858014 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0923 12:05:48.988542 1858014 kic_runner.go:114] Args: [docker exec --privileged addons-242108 chown docker:docker /home/docker/.ssh/authorized_keys]
I0923 12:05:49.028200 1858014 cli_runner.go:164] Run: docker container inspect addons-242108 --format={{.State.Status}}
I0923 12:05:49.046553 1858014 machine.go:93] provisionDockerMachine start ...
I0923 12:05:49.046642 1858014 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-242108
I0923 12:05:49.065682 1858014 main.go:141] libmachine: Using SSH client type: native
I0923 12:05:49.065888 1858014 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x864a40] 0x867720 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0923 12:05:49.065902 1858014 main.go:141] libmachine: About to run SSH command:
hostname
I0923 12:05:49.066602 1858014 main.go:141] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:33222->127.0.0.1:32768: read: connection reset by peer
I0923 12:05:52.194518 1858014 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-242108
I0923 12:05:52.194553 1858014 ubuntu.go:169] provisioning hostname "addons-242108"
I0923 12:05:52.194614 1858014 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-242108
I0923 12:05:52.211500 1858014 main.go:141] libmachine: Using SSH client type: native
I0923 12:05:52.211697 1858014 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x864a40] 0x867720 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0923 12:05:52.211710 1858014 main.go:141] libmachine: About to run SSH command:
sudo hostname addons-242108 && echo "addons-242108" | sudo tee /etc/hostname
I0923 12:05:52.349417 1858014 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-242108
I0923 12:05:52.349505 1858014 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-242108
I0923 12:05:52.368930 1858014 main.go:141] libmachine: Using SSH client type: native
I0923 12:05:52.369115 1858014 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x864a40] 0x867720 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0923 12:05:52.369137 1858014 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\saddons-242108' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 addons-242108/g' /etc/hosts;
else
echo '127.0.1.1 addons-242108' | sudo tee -a /etc/hosts;
fi
fi
I0923 12:05:52.495094 1858014 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0923 12:05:52.495132 1858014 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/19690-1849651/.minikube CaCertPath:/home/jenkins/minikube-integration/19690-1849651/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/19690-1849651/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/19690-1849651/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/19690-1849651/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/19690-1849651/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/19690-1849651/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/19690-1849651/.minikube}
I0923 12:05:52.495193 1858014 ubuntu.go:177] setting up certificates
I0923 12:05:52.495214 1858014 provision.go:84] configureAuth start
I0923 12:05:52.495307 1858014 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-242108
I0923 12:05:52.512284 1858014 provision.go:143] copyHostCerts
I0923 12:05:52.512367 1858014 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19690-1849651/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/19690-1849651/.minikube/cert.pem (1123 bytes)
I0923 12:05:52.512495 1858014 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19690-1849651/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/19690-1849651/.minikube/key.pem (1679 bytes)
I0923 12:05:52.512587 1858014 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19690-1849651/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/19690-1849651/.minikube/ca.pem (1082 bytes)
I0923 12:05:52.512662 1858014 provision.go:117] generating server cert: /home/jenkins/minikube-integration/19690-1849651/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/19690-1849651/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/19690-1849651/.minikube/certs/ca-key.pem org=jenkins.addons-242108 san=[127.0.0.1 192.168.49.2 addons-242108 localhost minikube]
I0923 12:05:52.744627 1858014 provision.go:177] copyRemoteCerts
I0923 12:05:52.744690 1858014 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0923 12:05:52.744723 1858014 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-242108
I0923 12:05:52.761492 1858014 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19690-1849651/.minikube/machines/addons-242108/id_rsa Username:docker}
I0923 12:05:52.851425 1858014 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19690-1849651/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I0923 12:05:52.872883 1858014 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19690-1849651/.minikube/machines/server.pem --> /etc/docker/server.pem (1208 bytes)
I0923 12:05:52.894085 1858014 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19690-1849651/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I0923 12:05:52.914380 1858014 provision.go:87] duration metric: took 419.147945ms to configureAuth
I0923 12:05:52.914409 1858014 ubuntu.go:193] setting minikube options for container-runtime
I0923 12:05:52.914592 1858014 config.go:182] Loaded profile config "addons-242108": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.1
I0923 12:05:52.914656 1858014 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-242108
I0923 12:05:52.930851 1858014 main.go:141] libmachine: Using SSH client type: native
I0923 12:05:52.931031 1858014 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x864a40] 0x867720 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0923 12:05:52.931056 1858014 main.go:141] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I0923 12:05:53.055242 1858014 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
I0923 12:05:53.055281 1858014 ubuntu.go:71] root file system type: overlay
I0923 12:05:53.055409 1858014 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I0923 12:05:53.055504 1858014 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-242108
I0923 12:05:53.072320 1858014 main.go:141] libmachine: Using SSH client type: native
I0923 12:05:53.072542 1858014 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x864a40] 0x867720 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0923 12:05:53.072641 1858014 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I0923 12:05:53.210157 1858014 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
I0923 12:05:53.210251 1858014 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-242108
I0923 12:05:53.226571 1858014 main.go:141] libmachine: Using SSH client type: native
I0923 12:05:53.226774 1858014 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x864a40] 0x867720 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0923 12:05:53.226799 1858014 main.go:141] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I0923 12:05:53.924966 1858014 main.go:141] libmachine: SSH cmd err, output: <nil>: --- /lib/systemd/system/docker.service 2024-09-19 14:24:32.000000000 +0000
+++ /lib/systemd/system/docker.service.new 2024-09-23 12:05:53.205157890 +0000
@@ -1,46 +1,49 @@
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
-After=network-online.target docker.socket firewalld.service containerd.service time-set.target
-Wants=network-online.target containerd.service
+BindsTo=containerd.service
+After=network-online.target firewalld.service containerd.service
+Wants=network-online.target
Requires=docker.socket
+StartLimitBurst=3
+StartLimitIntervalSec=60
[Service]
Type=notify
-# the default is not to use systemd for cgroups because the delegate issues still
-# exists and systemd currently does not support the cgroup feature set required
-# for containers run by docker
-ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
-ExecReload=/bin/kill -s HUP $MAINPID
-TimeoutStartSec=0
-RestartSec=2
-Restart=always
+Restart=on-failure
-# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
-# Both the old, and new location are accepted by systemd 229 and up, so using the old location
-# to make them work for either version of systemd.
-StartLimitBurst=3
-# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
-# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
-# this option work for either version of systemd.
-StartLimitInterval=60s
+
+# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
+# The base configuration already specifies an 'ExecStart=...' command. The first directive
+# here is to clear out that command inherited from the base configuration. Without this,
+# the command from the base configuration and the command specified here are treated as
+# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
+# will catch this invalid input and refuse to start the service with an error like:
+# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
+
+# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
+# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
+ExecStart=
+ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
+ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
-# Comment TasksMax if your systemd version does not support it.
-# Only systemd 226 and above support this option.
+# Uncomment TasksMax if your systemd version supports it.
+# Only systemd 226 and above support this version.
TasksMax=infinity
+TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
-OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable docker
I0923 12:05:53.925004 1858014 machine.go:96] duration metric: took 4.878428741s to provisionDockerMachine
I0923 12:05:53.925019 1858014 client.go:171] duration metric: took 14.407056506s to LocalClient.Create
I0923 12:05:53.925047 1858014 start.go:167] duration metric: took 14.407129934s to libmachine.API.Create "addons-242108"
I0923 12:05:53.925061 1858014 start.go:293] postStartSetup for "addons-242108" (driver="docker")
I0923 12:05:53.925077 1858014 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0923 12:05:53.925145 1858014 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0923 12:05:53.925194 1858014 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-242108
I0923 12:05:53.941157 1858014 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19690-1849651/.minikube/machines/addons-242108/id_rsa Username:docker}
I0923 12:05:54.031730 1858014 ssh_runner.go:195] Run: cat /etc/os-release
I0923 12:05:54.034840 1858014 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0923 12:05:54.034874 1858014 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0923 12:05:54.034884 1858014 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0923 12:05:54.034893 1858014 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I0923 12:05:54.034906 1858014 filesync.go:126] Scanning /home/jenkins/minikube-integration/19690-1849651/.minikube/addons for local assets ...
I0923 12:05:54.034969 1858014 filesync.go:126] Scanning /home/jenkins/minikube-integration/19690-1849651/.minikube/files for local assets ...
I0923 12:05:54.034995 1858014 start.go:296] duration metric: took 109.92529ms for postStartSetup
I0923 12:05:54.035297 1858014 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-242108
I0923 12:05:54.052389 1858014 profile.go:143] Saving config to /home/jenkins/minikube-integration/19690-1849651/.minikube/profiles/addons-242108/config.json ...
I0923 12:05:54.052640 1858014 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0923 12:05:54.052720 1858014 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-242108
I0923 12:05:54.068579 1858014 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19690-1849651/.minikube/machines/addons-242108/id_rsa Username:docker}
I0923 12:05:54.155940 1858014 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0923 12:05:54.160050 1858014 start.go:128] duration metric: took 14.700228119s to createHost
I0923 12:05:54.160076 1858014 start.go:83] releasing machines lock for "addons-242108", held for 14.700392741s
I0923 12:05:54.160155 1858014 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-242108
I0923 12:05:54.175958 1858014 ssh_runner.go:195] Run: cat /version.json
I0923 12:05:54.175991 1858014 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0923 12:05:54.176008 1858014 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-242108
I0923 12:05:54.176041 1858014 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-242108
I0923 12:05:54.193081 1858014 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19690-1849651/.minikube/machines/addons-242108/id_rsa Username:docker}
I0923 12:05:54.193543 1858014 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19690-1849651/.minikube/machines/addons-242108/id_rsa Username:docker}
I0923 12:05:54.348035 1858014 ssh_runner.go:195] Run: systemctl --version
I0923 12:05:54.352106 1858014 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0923 12:05:54.356127 1858014 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0923 12:05:54.378217 1858014 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0923 12:05:54.378282 1858014 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0923 12:05:54.401848 1858014 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0923 12:05:54.401870 1858014 start.go:495] detecting cgroup driver to use...
I0923 12:05:54.401900 1858014 detect.go:187] detected "cgroupfs" cgroup driver on host os
I0923 12:05:54.402035 1858014 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0923 12:05:54.415741 1858014 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10"|' /etc/containerd/config.toml"
I0923 12:05:54.424063 1858014 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0923 12:05:54.432224 1858014 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I0923 12:05:54.432263 1858014 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I0923 12:05:54.440448 1858014 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0923 12:05:54.448779 1858014 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0923 12:05:54.456734 1858014 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0923 12:05:54.464896 1858014 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0923 12:05:54.472552 1858014 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0923 12:05:54.480617 1858014 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0923 12:05:54.488792 1858014 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0923 12:05:54.497083 1858014 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0923 12:05:54.504033 1858014 crio.go:166] couldn't verify netfilter by "sudo sysctl net.bridge.bridge-nf-call-iptables" which might be okay. error: sudo sysctl net.bridge.bridge-nf-call-iptables: Process exited with status 255
stdout:
stderr:
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
I0923 12:05:54.504069 1858014 ssh_runner.go:195] Run: sudo modprobe br_netfilter
I0923 12:05:54.516705 1858014 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0923 12:05:54.524986 1858014 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0923 12:05:54.603045 1858014 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0923 12:05:54.692543 1858014 start.go:495] detecting cgroup driver to use...
I0923 12:05:54.692591 1858014 detect.go:187] detected "cgroupfs" cgroup driver on host os
I0923 12:05:54.692652 1858014 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0923 12:05:54.703720 1858014 cruntime.go:279] skipping containerd shutdown because we are bound to it
I0923 12:05:54.703814 1858014 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0923 12:05:54.715714 1858014 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I0923 12:05:54.732071 1858014 ssh_runner.go:195] Run: which cri-dockerd
I0923 12:05:54.735459 1858014 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I0923 12:05:54.744407 1858014 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (190 bytes)
I0923 12:05:54.760982 1858014 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I0923 12:05:54.856236 1858014 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I0923 12:05:54.949435 1858014 docker.go:574] configuring docker to use "cgroupfs" as cgroup driver...
I0923 12:05:54.949589 1858014 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I0923 12:05:54.966616 1858014 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0923 12:05:55.043659 1858014 ssh_runner.go:195] Run: sudo systemctl restart docker
I0923 12:05:55.295021 1858014 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I0923 12:05:55.305935 1858014 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0923 12:05:55.316007 1858014 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I0923 12:05:55.395895 1858014 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I0923 12:05:55.467401 1858014 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0923 12:05:55.539536 1858014 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I0923 12:05:55.551018 1858014 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0923 12:05:55.560228 1858014 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0923 12:05:55.631608 1858014 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I0923 12:05:55.690509 1858014 start.go:542] Will wait 60s for socket path /var/run/cri-dockerd.sock
I0923 12:05:55.690632 1858014 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I0923 12:05:55.694352 1858014 start.go:563] Will wait 60s for crictl version
I0923 12:05:55.694406 1858014 ssh_runner.go:195] Run: which crictl
I0923 12:05:55.697507 1858014 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0923 12:05:55.729969 1858014 start.go:579] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 27.3.0
RuntimeApiVersion: v1
I0923 12:05:55.730034 1858014 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0923 12:05:55.752872 1858014 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0923 12:05:55.777291 1858014 out.go:235] * Preparing Kubernetes v1.31.1 on Docker 27.3.0 ...
I0923 12:05:55.777368 1858014 cli_runner.go:164] Run: docker network inspect addons-242108 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0923 12:05:55.793348 1858014 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0923 12:05:55.796737 1858014 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0923 12:05:55.807062 1858014 kubeadm.go:883] updating cluster {Name:addons-242108 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-242108 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNa
mes:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuF
irmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I0923 12:05:55.807159 1858014 preload.go:131] Checking if preload exists for k8s version v1.31.1 and runtime docker
I0923 12:05:55.807213 1858014 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0923 12:05:55.826470 1858014 docker.go:685] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.31.1
registry.k8s.io/kube-scheduler:v1.31.1
registry.k8s.io/kube-controller-manager:v1.31.1
registry.k8s.io/kube-proxy:v1.31.1
registry.k8s.io/coredns/coredns:v1.11.3
registry.k8s.io/etcd:3.5.15-0
registry.k8s.io/pause:3.10
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0923 12:05:55.826489 1858014 docker.go:615] Images already preloaded, skipping extraction
I0923 12:05:55.826535 1858014 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0923 12:05:55.844354 1858014 docker.go:685] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.31.1
registry.k8s.io/kube-scheduler:v1.31.1
registry.k8s.io/kube-controller-manager:v1.31.1
registry.k8s.io/kube-proxy:v1.31.1
registry.k8s.io/coredns/coredns:v1.11.3
registry.k8s.io/etcd:3.5.15-0
registry.k8s.io/pause:3.10
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0923 12:05:55.844376 1858014 cache_images.go:84] Images are preloaded, skipping loading
I0923 12:05:55.844386 1858014 kubeadm.go:934] updating node { 192.168.49.2 8443 v1.31.1 docker true true} ...
I0923 12:05:55.844499 1858014 kubeadm.go:946] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.31.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=addons-242108 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
[Install]
config:
{KubernetesVersion:v1.31.1 ClusterName:addons-242108 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0923 12:05:55.844554 1858014 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I0923 12:05:55.886107 1858014 cni.go:84] Creating CNI manager for ""
I0923 12:05:55.886136 1858014 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0923 12:05:55.886148 1858014 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I0923 12:05:55.886169 1858014 kubeadm.go:181] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.31.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:addons-242108 NodeName:addons-242108 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kuber
netes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0923 12:05:55.886308 1858014 kubeadm.go:187] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.49.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "addons-242108"
kubeletExtraArgs:
node-ip: 192.168.49.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.31.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0923 12:05:55.886361 1858014 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.31.1
I0923 12:05:55.894535 1858014 binaries.go:44] Found k8s binaries, skipping transfer
I0923 12:05:55.894599 1858014 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I0923 12:05:55.902042 1858014 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (312 bytes)
I0923 12:05:55.917203 1858014 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0923 12:05:55.931994 1858014 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2155 bytes)
I0923 12:05:55.946730 1858014 ssh_runner.go:195] Run: grep 192.168.49.2 control-plane.minikube.internal$ /etc/hosts
I0923 12:05:55.949587 1858014 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0923 12:05:55.959317 1858014 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0923 12:05:56.034034 1858014 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0923 12:05:56.046352 1858014 certs.go:68] Setting up /home/jenkins/minikube-integration/19690-1849651/.minikube/profiles/addons-242108 for IP: 192.168.49.2
I0923 12:05:56.046372 1858014 certs.go:194] generating shared ca certs ...
I0923 12:05:56.046389 1858014 certs.go:226] acquiring lock for ca certs: {Name:mkd617dd2c7f71c1d4210c8e0f68b24c1c10d968 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0923 12:05:56.046521 1858014 certs.go:240] generating "minikubeCA" ca cert: /home/jenkins/minikube-integration/19690-1849651/.minikube/ca.key
I0923 12:05:56.106772 1858014 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19690-1849651/.minikube/ca.crt ...
I0923 12:05:56.106797 1858014 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19690-1849651/.minikube/ca.crt: {Name:mk3f0c8c6df5343dfe514530bd7928b0fe2be574 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0923 12:05:56.106940 1858014 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19690-1849651/.minikube/ca.key ...
I0923 12:05:56.106951 1858014 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19690-1849651/.minikube/ca.key: {Name:mk277f8afdee39e7d221a0138e71335be3ddf5f5 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0923 12:05:56.107023 1858014 certs.go:240] generating "proxyClientCA" ca cert: /home/jenkins/minikube-integration/19690-1849651/.minikube/proxy-client-ca.key
I0923 12:05:56.473807 1858014 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19690-1849651/.minikube/proxy-client-ca.crt ...
I0923 12:05:56.473837 1858014 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19690-1849651/.minikube/proxy-client-ca.crt: {Name:mk8b0430ca5e731d80ba61b46a2b6ead5426dd55 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0923 12:05:56.474002 1858014 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19690-1849651/.minikube/proxy-client-ca.key ...
I0923 12:05:56.474012 1858014 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19690-1849651/.minikube/proxy-client-ca.key: {Name:mk712fce54c3079b71b7c02b100c02dabe556630 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0923 12:05:56.474081 1858014 certs.go:256] generating profile certs ...
I0923 12:05:56.474133 1858014 certs.go:363] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/19690-1849651/.minikube/profiles/addons-242108/client.key
I0923 12:05:56.474144 1858014 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19690-1849651/.minikube/profiles/addons-242108/client.crt with IP's: []
I0923 12:05:56.566542 1858014 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19690-1849651/.minikube/profiles/addons-242108/client.crt ...
I0923 12:05:56.566568 1858014 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19690-1849651/.minikube/profiles/addons-242108/client.crt: {Name:mk76e0a97b1f3d8b96a444eb4d58dca89a0ba384 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0923 12:05:56.566715 1858014 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19690-1849651/.minikube/profiles/addons-242108/client.key ...
I0923 12:05:56.566724 1858014 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19690-1849651/.minikube/profiles/addons-242108/client.key: {Name:mk54473dc76920a20a283c80430dc452819242c9 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0923 12:05:56.566792 1858014 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/19690-1849651/.minikube/profiles/addons-242108/apiserver.key.adcfa988
I0923 12:05:56.566809 1858014 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19690-1849651/.minikube/profiles/addons-242108/apiserver.crt.adcfa988 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2]
I0923 12:05:56.640174 1858014 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19690-1849651/.minikube/profiles/addons-242108/apiserver.crt.adcfa988 ...
I0923 12:05:56.640198 1858014 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19690-1849651/.minikube/profiles/addons-242108/apiserver.crt.adcfa988: {Name:mk00385d9913db47720e530222257f95bcdedecd Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0923 12:05:56.640338 1858014 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19690-1849651/.minikube/profiles/addons-242108/apiserver.key.adcfa988 ...
I0923 12:05:56.640351 1858014 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19690-1849651/.minikube/profiles/addons-242108/apiserver.key.adcfa988: {Name:mkc62f6ea8dbdba548fc840ca0c319eec2ae3bba Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0923 12:05:56.640427 1858014 certs.go:381] copying /home/jenkins/minikube-integration/19690-1849651/.minikube/profiles/addons-242108/apiserver.crt.adcfa988 -> /home/jenkins/minikube-integration/19690-1849651/.minikube/profiles/addons-242108/apiserver.crt
I0923 12:05:56.640502 1858014 certs.go:385] copying /home/jenkins/minikube-integration/19690-1849651/.minikube/profiles/addons-242108/apiserver.key.adcfa988 -> /home/jenkins/minikube-integration/19690-1849651/.minikube/profiles/addons-242108/apiserver.key
I0923 12:05:56.640547 1858014 certs.go:363] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/19690-1849651/.minikube/profiles/addons-242108/proxy-client.key
I0923 12:05:56.640563 1858014 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19690-1849651/.minikube/profiles/addons-242108/proxy-client.crt with IP's: []
I0923 12:05:56.865162 1858014 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19690-1849651/.minikube/profiles/addons-242108/proxy-client.crt ...
I0923 12:05:56.865195 1858014 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19690-1849651/.minikube/profiles/addons-242108/proxy-client.crt: {Name:mk108295b5a38c142682f472e5ed0750e966abff Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0923 12:05:56.865353 1858014 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19690-1849651/.minikube/profiles/addons-242108/proxy-client.key ...
I0923 12:05:56.865366 1858014 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19690-1849651/.minikube/profiles/addons-242108/proxy-client.key: {Name:mkcd65dc874fcf1fe98c3001e16d9d636631337b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0923 12:05:56.865545 1858014 certs.go:484] found cert: /home/jenkins/minikube-integration/19690-1849651/.minikube/certs/ca-key.pem (1675 bytes)
I0923 12:05:56.865580 1858014 certs.go:484] found cert: /home/jenkins/minikube-integration/19690-1849651/.minikube/certs/ca.pem (1082 bytes)
I0923 12:05:56.865605 1858014 certs.go:484] found cert: /home/jenkins/minikube-integration/19690-1849651/.minikube/certs/cert.pem (1123 bytes)
I0923 12:05:56.865628 1858014 certs.go:484] found cert: /home/jenkins/minikube-integration/19690-1849651/.minikube/certs/key.pem (1679 bytes)
I0923 12:05:56.866297 1858014 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19690-1849651/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0923 12:05:56.888614 1858014 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19690-1849651/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I0923 12:05:56.909440 1858014 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19690-1849651/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0923 12:05:56.929815 1858014 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19690-1849651/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I0923 12:05:56.949969 1858014 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19690-1849651/.minikube/profiles/addons-242108/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1419 bytes)
I0923 12:05:56.969972 1858014 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19690-1849651/.minikube/profiles/addons-242108/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I0923 12:05:56.990008 1858014 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19690-1849651/.minikube/profiles/addons-242108/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0923 12:05:57.009838 1858014 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19690-1849651/.minikube/profiles/addons-242108/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I0923 12:05:57.030215 1858014 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19690-1849651/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0923 12:05:57.051786 1858014 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0923 12:05:57.066933 1858014 ssh_runner.go:195] Run: openssl version
I0923 12:05:57.071660 1858014 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0923 12:05:57.079646 1858014 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0923 12:05:57.082575 1858014 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Sep 23 12:05 /usr/share/ca-certificates/minikubeCA.pem
I0923 12:05:57.082619 1858014 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0923 12:05:57.088559 1858014 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0923 12:05:57.096543 1858014 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0923 12:05:57.099251 1858014 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0923 12:05:57.099318 1858014 kubeadm.go:392] StartCluster: {Name:addons-242108 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-242108 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames
:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirm
warePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0923 12:05:57.099425 1858014 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I0923 12:05:57.115907 1858014 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0923 12:05:57.123831 1858014 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0923 12:05:57.131780 1858014 kubeadm.go:214] ignoring SystemVerification for kubeadm because of docker driver
I0923 12:05:57.131830 1858014 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0923 12:05:57.139214 1858014 kubeadm.go:155] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0923 12:05:57.139231 1858014 kubeadm.go:157] found existing configuration files:
I0923 12:05:57.139286 1858014 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I0923 12:05:57.148309 1858014 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I0923 12:05:57.148364 1858014 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I0923 12:05:57.155594 1858014 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I0923 12:05:57.163697 1858014 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I0923 12:05:57.163744 1858014 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I0923 12:05:57.170837 1858014 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I0923 12:05:57.178139 1858014 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I0923 12:05:57.178184 1858014 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0923 12:05:57.185507 1858014 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I0923 12:05:57.192840 1858014 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I0923 12:05:57.192893 1858014 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0923 12:05:57.200034 1858014 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.31.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0923 12:05:57.234835 1858014 kubeadm.go:310] [init] Using Kubernetes version: v1.31.1
I0923 12:05:57.234919 1858014 kubeadm.go:310] [preflight] Running pre-flight checks
I0923 12:05:57.254761 1858014 kubeadm.go:310] [preflight] The system verification failed. Printing the output from the verification:
I0923 12:05:57.254824 1858014 kubeadm.go:310] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1069-gcp[0m
I0923 12:05:57.254903 1858014 kubeadm.go:310] [0;37mOS[0m: [0;32mLinux[0m
I0923 12:05:57.254978 1858014 kubeadm.go:310] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0923 12:05:57.255026 1858014 kubeadm.go:310] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I0923 12:05:57.255083 1858014 kubeadm.go:310] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0923 12:05:57.255139 1858014 kubeadm.go:310] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0923 12:05:57.255201 1858014 kubeadm.go:310] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0923 12:05:57.255300 1858014 kubeadm.go:310] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0923 12:05:57.255378 1858014 kubeadm.go:310] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0923 12:05:57.255454 1858014 kubeadm.go:310] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0923 12:05:57.255526 1858014 kubeadm.go:310] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I0923 12:05:57.302495 1858014 kubeadm.go:310] [preflight] Pulling images required for setting up a Kubernetes cluster
I0923 12:05:57.302629 1858014 kubeadm.go:310] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0923 12:05:57.302761 1858014 kubeadm.go:310] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I0923 12:05:57.312634 1858014 kubeadm.go:310] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0923 12:05:57.315892 1858014 out.go:235] - Generating certificates and keys ...
I0923 12:05:57.315991 1858014 kubeadm.go:310] [certs] Using existing ca certificate authority
I0923 12:05:57.316070 1858014 kubeadm.go:310] [certs] Using existing apiserver certificate and key on disk
I0923 12:05:57.392273 1858014 kubeadm.go:310] [certs] Generating "apiserver-kubelet-client" certificate and key
I0923 12:05:57.500855 1858014 kubeadm.go:310] [certs] Generating "front-proxy-ca" certificate and key
I0923 12:05:57.657759 1858014 kubeadm.go:310] [certs] Generating "front-proxy-client" certificate and key
I0923 12:05:57.732947 1858014 kubeadm.go:310] [certs] Generating "etcd/ca" certificate and key
I0923 12:05:57.920894 1858014 kubeadm.go:310] [certs] Generating "etcd/server" certificate and key
I0923 12:05:57.921039 1858014 kubeadm.go:310] [certs] etcd/server serving cert is signed for DNS names [addons-242108 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0923 12:05:58.155582 1858014 kubeadm.go:310] [certs] Generating "etcd/peer" certificate and key
I0923 12:05:58.155772 1858014 kubeadm.go:310] [certs] etcd/peer serving cert is signed for DNS names [addons-242108 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0923 12:05:58.263977 1858014 kubeadm.go:310] [certs] Generating "etcd/healthcheck-client" certificate and key
I0923 12:05:58.450677 1858014 kubeadm.go:310] [certs] Generating "apiserver-etcd-client" certificate and key
I0923 12:05:58.552662 1858014 kubeadm.go:310] [certs] Generating "sa" key and public key
I0923 12:05:58.552744 1858014 kubeadm.go:310] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0923 12:05:58.746041 1858014 kubeadm.go:310] [kubeconfig] Writing "admin.conf" kubeconfig file
I0923 12:05:58.789095 1858014 kubeadm.go:310] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I0923 12:05:58.989263 1858014 kubeadm.go:310] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0923 12:05:59.094015 1858014 kubeadm.go:310] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0923 12:05:59.264402 1858014 kubeadm.go:310] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0923 12:05:59.264932 1858014 kubeadm.go:310] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0923 12:05:59.267316 1858014 kubeadm.go:310] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0923 12:05:59.270260 1858014 out.go:235] - Booting up control plane ...
I0923 12:05:59.270374 1858014 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0923 12:05:59.270472 1858014 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0923 12:05:59.270578 1858014 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0923 12:05:59.279492 1858014 kubeadm.go:310] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0923 12:05:59.285185 1858014 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0923 12:05:59.285254 1858014 kubeadm.go:310] [kubelet-start] Starting the kubelet
I0923 12:05:59.366082 1858014 kubeadm.go:310] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I0923 12:05:59.366186 1858014 kubeadm.go:310] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I0923 12:06:00.367527 1858014 kubeadm.go:310] [kubelet-check] The kubelet is healthy after 1.001497547s
I0923 12:06:00.367632 1858014 kubeadm.go:310] [api-check] Waiting for a healthy API server. This can take up to 4m0s
I0923 12:06:04.868886 1858014 kubeadm.go:310] [api-check] The API server is healthy after 4.501336559s
I0923 12:06:04.879649 1858014 kubeadm.go:310] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0923 12:06:04.889624 1858014 kubeadm.go:310] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I0923 12:06:04.904349 1858014 kubeadm.go:310] [upload-certs] Skipping phase. Please see --upload-certs
I0923 12:06:04.904615 1858014 kubeadm.go:310] [mark-control-plane] Marking the node addons-242108 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I0923 12:06:04.911134 1858014 kubeadm.go:310] [bootstrap-token] Using token: q1lf1o.0evqptum6dcwxyx7
I0923 12:06:04.912567 1858014 out.go:235] - Configuring RBAC rules ...
I0923 12:06:04.912714 1858014 kubeadm.go:310] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I0923 12:06:04.915714 1858014 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I0923 12:06:04.921393 1858014 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I0923 12:06:04.923612 1858014 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I0923 12:06:04.925791 1858014 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I0923 12:06:04.928054 1858014 kubeadm.go:310] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I0923 12:06:05.274840 1858014 kubeadm.go:310] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I0923 12:06:05.689656 1858014 kubeadm.go:310] [addons] Applied essential addon: CoreDNS
I0923 12:06:06.274470 1858014 kubeadm.go:310] [addons] Applied essential addon: kube-proxy
I0923 12:06:06.275249 1858014 kubeadm.go:310]
I0923 12:06:06.275376 1858014 kubeadm.go:310] Your Kubernetes control-plane has initialized successfully!
I0923 12:06:06.275407 1858014 kubeadm.go:310]
I0923 12:06:06.275517 1858014 kubeadm.go:310] To start using your cluster, you need to run the following as a regular user:
I0923 12:06:06.275525 1858014 kubeadm.go:310]
I0923 12:06:06.275566 1858014 kubeadm.go:310] mkdir -p $HOME/.kube
I0923 12:06:06.275649 1858014 kubeadm.go:310] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I0923 12:06:06.275718 1858014 kubeadm.go:310] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I0923 12:06:06.275725 1858014 kubeadm.go:310]
I0923 12:06:06.275776 1858014 kubeadm.go:310] Alternatively, if you are the root user, you can run:
I0923 12:06:06.275782 1858014 kubeadm.go:310]
I0923 12:06:06.275843 1858014 kubeadm.go:310] export KUBECONFIG=/etc/kubernetes/admin.conf
I0923 12:06:06.275884 1858014 kubeadm.go:310]
I0923 12:06:06.275968 1858014 kubeadm.go:310] You should now deploy a pod network to the cluster.
I0923 12:06:06.276067 1858014 kubeadm.go:310] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I0923 12:06:06.276153 1858014 kubeadm.go:310] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I0923 12:06:06.276162 1858014 kubeadm.go:310]
I0923 12:06:06.276270 1858014 kubeadm.go:310] You can now join any number of control-plane nodes by copying certificate authorities
I0923 12:06:06.276366 1858014 kubeadm.go:310] and service account keys on each node and then running the following as root:
I0923 12:06:06.276373 1858014 kubeadm.go:310]
I0923 12:06:06.276477 1858014 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token q1lf1o.0evqptum6dcwxyx7 \
I0923 12:06:06.276609 1858014 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:dc4fd7da959f7b699ddb5f7386af3bc525389d0ccb89f77353dd9ad1bd89f89b \
I0923 12:06:06.276643 1858014 kubeadm.go:310] --control-plane
I0923 12:06:06.276651 1858014 kubeadm.go:310]
I0923 12:06:06.276754 1858014 kubeadm.go:310] Then you can join any number of worker nodes by running the following on each as root:
I0923 12:06:06.276770 1858014 kubeadm.go:310]
I0923 12:06:06.276844 1858014 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token q1lf1o.0evqptum6dcwxyx7 \
I0923 12:06:06.276933 1858014 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:dc4fd7da959f7b699ddb5f7386af3bc525389d0ccb89f77353dd9ad1bd89f89b
I0923 12:06:06.278939 1858014 kubeadm.go:310] W0923 12:05:57.232436 1926 common.go:101] your configuration file uses a deprecated API spec: "kubeadm.k8s.io/v1beta3" (kind: "ClusterConfiguration"). Please use 'kubeadm config migrate --old-config old.yaml --new-config new.yaml', which will write the new, similar spec using a newer API version.
I0923 12:06:06.279201 1858014 kubeadm.go:310] W0923 12:05:57.233045 1926 common.go:101] your configuration file uses a deprecated API spec: "kubeadm.k8s.io/v1beta3" (kind: "InitConfiguration"). Please use 'kubeadm config migrate --old-config old.yaml --new-config new.yaml', which will write the new, similar spec using a newer API version.
I0923 12:06:06.279519 1858014 kubeadm.go:310] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1069-gcp\n", err: exit status 1
I0923 12:06:06.279676 1858014 kubeadm.go:310] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0923 12:06:06.279709 1858014 cni.go:84] Creating CNI manager for ""
I0923 12:06:06.279731 1858014 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0923 12:06:06.281255 1858014 out.go:177] * Configuring bridge CNI (Container Networking Interface) ...
I0923 12:06:06.282334 1858014 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d
I0923 12:06:06.290730 1858014 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (496 bytes)
I0923 12:06:06.306366 1858014 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I0923 12:06:06.306443 1858014 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I0923 12:06:06.306483 1858014 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes addons-242108 minikube.k8s.io/updated_at=2024_09_23T12_06_06_0700 minikube.k8s.io/version=v1.34.0 minikube.k8s.io/commit=30f673d6edb6d12f8aba2f7e30667ea1b6d205d1 minikube.k8s.io/name=addons-242108 minikube.k8s.io/primary=true
I0923 12:06:06.313715 1858014 ops.go:34] apiserver oom_adj: -16
I0923 12:06:06.386501 1858014 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0923 12:06:06.887358 1858014 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0923 12:06:07.387139 1858014 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0923 12:06:07.886571 1858014 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0923 12:06:08.386847 1858014 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0923 12:06:08.887308 1858014 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0923 12:06:09.386683 1858014 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0923 12:06:09.887398 1858014 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0923 12:06:10.387393 1858014 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0923 12:06:10.450699 1858014 kubeadm.go:1113] duration metric: took 4.144319952s to wait for elevateKubeSystemPrivileges
I0923 12:06:10.450737 1858014 kubeadm.go:394] duration metric: took 13.351425547s to StartCluster
I0923 12:06:10.450758 1858014 settings.go:142] acquiring lock: {Name:mkcd0a3d308b3cd8f2dd83d02f7ea43972901749 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0923 12:06:10.450887 1858014 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/19690-1849651/kubeconfig
I0923 12:06:10.451304 1858014 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19690-1849651/kubeconfig: {Name:mkb7cdc772d8ac72e3c4d98a4ce5cd302caad9a5 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0923 12:06:10.451520 1858014 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I0923 12:06:10.451552 1858014 start.go:235] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}
I0923 12:06:10.451670 1858014 addons.go:507] enable addons start: toEnable=map[ambassador:false auto-pause:false cloud-spanner:true csi-hostpath-driver:true dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:true gvisor:false headlamp:false inaccel:false ingress:true ingress-dns:true inspektor-gadget:true istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-device-plugin:true nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:true registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:true volcano:true volumesnapshots:true yakd:true]
I0923 12:06:10.451768 1858014 config.go:182] Loaded profile config "addons-242108": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.1
I0923 12:06:10.451798 1858014 addons.go:69] Setting gcp-auth=true in profile "addons-242108"
I0923 12:06:10.451813 1858014 addons.go:69] Setting ingress=true in profile "addons-242108"
I0923 12:06:10.451818 1858014 addons.go:69] Setting cloud-spanner=true in profile "addons-242108"
I0923 12:06:10.451826 1858014 addons.go:234] Setting addon ingress=true in "addons-242108"
I0923 12:06:10.451831 1858014 addons.go:234] Setting addon cloud-spanner=true in "addons-242108"
I0923 12:06:10.451844 1858014 mustload.go:65] Loading cluster: addons-242108
I0923 12:06:10.451841 1858014 addons.go:69] Setting default-storageclass=true in profile "addons-242108"
I0923 12:06:10.451838 1858014 addons.go:69] Setting csi-hostpath-driver=true in profile "addons-242108"
I0923 12:06:10.451861 1858014 host.go:66] Checking if "addons-242108" exists ...
I0923 12:06:10.451867 1858014 host.go:66] Checking if "addons-242108" exists ...
I0923 12:06:10.451871 1858014 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "addons-242108"
I0923 12:06:10.451876 1858014 addons.go:69] Setting registry=true in profile "addons-242108"
I0923 12:06:10.451894 1858014 addons.go:69] Setting storage-provisioner-rancher=true in profile "addons-242108"
I0923 12:06:10.451901 1858014 addons.go:234] Setting addon csi-hostpath-driver=true in "addons-242108"
I0923 12:06:10.451916 1858014 addons_storage_classes.go:33] enableOrDisableStorageClasses storage-provisioner-rancher=true on "addons-242108"
I0923 12:06:10.451928 1858014 addons.go:69] Setting inspektor-gadget=true in profile "addons-242108"
I0923 12:06:10.451938 1858014 host.go:66] Checking if "addons-242108" exists ...
I0923 12:06:10.451948 1858014 addons.go:69] Setting metrics-server=true in profile "addons-242108"
I0923 12:06:10.451958 1858014 addons.go:234] Setting addon metrics-server=true in "addons-242108"
I0923 12:06:10.451988 1858014 host.go:66] Checking if "addons-242108" exists ...
I0923 12:06:10.452047 1858014 config.go:182] Loaded profile config "addons-242108": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.1
I0923 12:06:10.452211 1858014 cli_runner.go:164] Run: docker container inspect addons-242108 --format={{.State.Status}}
I0923 12:06:10.452238 1858014 cli_runner.go:164] Run: docker container inspect addons-242108 --format={{.State.Status}}
I0923 12:06:10.452288 1858014 cli_runner.go:164] Run: docker container inspect addons-242108 --format={{.State.Status}}
I0923 12:06:10.452393 1858014 cli_runner.go:164] Run: docker container inspect addons-242108 --format={{.State.Status}}
I0923 12:06:10.452440 1858014 cli_runner.go:164] Run: docker container inspect addons-242108 --format={{.State.Status}}
I0923 12:06:10.452459 1858014 addons.go:69] Setting nvidia-device-plugin=true in profile "addons-242108"
I0923 12:06:10.452481 1858014 addons.go:234] Setting addon nvidia-device-plugin=true in "addons-242108"
I0923 12:06:10.452503 1858014 host.go:66] Checking if "addons-242108" exists ...
I0923 12:06:10.452504 1858014 addons.go:69] Setting volcano=true in profile "addons-242108"
I0923 12:06:10.452521 1858014 addons.go:234] Setting addon volcano=true in "addons-242108"
I0923 12:06:10.452565 1858014 addons.go:69] Setting volumesnapshots=true in profile "addons-242108"
I0923 12:06:10.452573 1858014 host.go:66] Checking if "addons-242108" exists ...
I0923 12:06:10.452581 1858014 addons.go:234] Setting addon volumesnapshots=true in "addons-242108"
I0923 12:06:10.452605 1858014 host.go:66] Checking if "addons-242108" exists ...
I0923 12:06:10.451918 1858014 addons.go:69] Setting ingress-dns=true in profile "addons-242108"
I0923 12:06:10.452720 1858014 addons.go:234] Setting addon ingress-dns=true in "addons-242108"
I0923 12:06:10.452750 1858014 host.go:66] Checking if "addons-242108" exists ...
I0923 12:06:10.452936 1858014 cli_runner.go:164] Run: docker container inspect addons-242108 --format={{.State.Status}}
I0923 12:06:10.453044 1858014 cli_runner.go:164] Run: docker container inspect addons-242108 --format={{.State.Status}}
I0923 12:06:10.453215 1858014 cli_runner.go:164] Run: docker container inspect addons-242108 --format={{.State.Status}}
I0923 12:06:10.451941 1858014 addons.go:234] Setting addon inspektor-gadget=true in "addons-242108"
I0923 12:06:10.453550 1858014 host.go:66] Checking if "addons-242108" exists ...
I0923 12:06:10.452440 1858014 cli_runner.go:164] Run: docker container inspect addons-242108 --format={{.State.Status}}
I0923 12:06:10.454120 1858014 cli_runner.go:164] Run: docker container inspect addons-242108 --format={{.State.Status}}
I0923 12:06:10.451801 1858014 addons.go:69] Setting yakd=true in profile "addons-242108"
I0923 12:06:10.454201 1858014 addons.go:234] Setting addon yakd=true in "addons-242108"
I0923 12:06:10.454234 1858014 host.go:66] Checking if "addons-242108" exists ...
I0923 12:06:10.454870 1858014 out.go:177] * Verifying Kubernetes components...
I0923 12:06:10.458348 1858014 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0923 12:06:10.452417 1858014 cli_runner.go:164] Run: docker container inspect addons-242108 --format={{.State.Status}}
I0923 12:06:10.452438 1858014 addons.go:69] Setting storage-provisioner=true in profile "addons-242108"
I0923 12:06:10.458584 1858014 addons.go:234] Setting addon storage-provisioner=true in "addons-242108"
I0923 12:06:10.458638 1858014 host.go:66] Checking if "addons-242108" exists ...
I0923 12:06:10.451907 1858014 addons.go:234] Setting addon registry=true in "addons-242108"
I0923 12:06:10.459040 1858014 host.go:66] Checking if "addons-242108" exists ...
I0923 12:06:10.459168 1858014 cli_runner.go:164] Run: docker container inspect addons-242108 --format={{.State.Status}}
I0923 12:06:10.459554 1858014 cli_runner.go:164] Run: docker container inspect addons-242108 --format={{.State.Status}}
I0923 12:06:10.484361 1858014 out.go:177] - Using image registry.k8s.io/metrics-server/metrics-server:v0.7.2
I0923 12:06:10.485704 1858014 addons.go:431] installing /etc/kubernetes/addons/metrics-apiservice.yaml
I0923 12:06:10.485731 1858014 ssh_runner.go:362] scp metrics-server/metrics-apiservice.yaml --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
I0923 12:06:10.485794 1858014 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-242108
I0923 12:06:10.487670 1858014 cli_runner.go:164] Run: docker container inspect addons-242108 --format={{.State.Status}}
I0923 12:06:10.487887 1858014 cli_runner.go:164] Run: docker container inspect addons-242108 --format={{.State.Status}}
I0923 12:06:10.488478 1858014 out.go:177] - Using image nvcr.io/nvidia/k8s-device-plugin:v0.16.2
I0923 12:06:10.489763 1858014 addons.go:431] installing /etc/kubernetes/addons/nvidia-device-plugin.yaml
I0923 12:06:10.489784 1858014 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/nvidia-device-plugin.yaml (1966 bytes)
I0923 12:06:10.489831 1858014 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-242108
I0923 12:06:10.491992 1858014 out.go:177] - Using image registry.k8s.io/ingress-nginx/controller:v1.11.2
I0923 12:06:10.493266 1858014 out.go:177] - Using image registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.6.0
I0923 12:06:10.494406 1858014 host.go:66] Checking if "addons-242108" exists ...
I0923 12:06:10.495496 1858014 out.go:177] - Using image registry.k8s.io/sig-storage/hostpathplugin:v1.9.0
I0923 12:06:10.496694 1858014 out.go:177] - Using image registry.k8s.io/sig-storage/livenessprobe:v2.8.0
I0923 12:06:10.497854 1858014 out.go:177] - Using image gcr.io/k8s-minikube/minikube-ingress-dns:0.0.3
I0923 12:06:10.498827 1858014 addons.go:234] Setting addon storage-provisioner-rancher=true in "addons-242108"
I0923 12:06:10.498882 1858014 host.go:66] Checking if "addons-242108" exists ...
I0923 12:06:10.499000 1858014 addons.go:431] installing /etc/kubernetes/addons/ingress-dns-pod.yaml
I0923 12:06:10.499013 1858014 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-dns-pod.yaml (2442 bytes)
I0923 12:06:10.499057 1858014 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-242108
I0923 12:06:10.499301 1858014 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.3
I0923 12:06:10.499365 1858014 out.go:177] - Using image registry.k8s.io/sig-storage/csi-resizer:v1.6.0
I0923 12:06:10.499495 1858014 cli_runner.go:164] Run: docker container inspect addons-242108 --format={{.State.Status}}
I0923 12:06:10.501812 1858014 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.3
I0923 12:06:10.501855 1858014 out.go:177] - Using image registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0
I0923 12:06:10.503161 1858014 addons.go:431] installing /etc/kubernetes/addons/ingress-deploy.yaml
I0923 12:06:10.503179 1858014 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-deploy.yaml (16078 bytes)
I0923 12:06:10.503227 1858014 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-242108
I0923 12:06:10.503426 1858014 out.go:177] - Using image registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
I0923 12:06:10.504863 1858014 out.go:177] - Using image registry.k8s.io/sig-storage/csi-attacher:v4.0.0
I0923 12:06:10.506372 1858014 addons.go:234] Setting addon default-storageclass=true in "addons-242108"
I0923 12:06:10.506418 1858014 host.go:66] Checking if "addons-242108" exists ...
I0923 12:06:10.506889 1858014 cli_runner.go:164] Run: docker container inspect addons-242108 --format={{.State.Status}}
I0923 12:06:10.507684 1858014 out.go:177] - Using image registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.7.0
I0923 12:06:10.508827 1858014 addons.go:431] installing /etc/kubernetes/addons/rbac-external-attacher.yaml
I0923 12:06:10.508848 1858014 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-attacher.yaml --> /etc/kubernetes/addons/rbac-external-attacher.yaml (3073 bytes)
I0923 12:06:10.508913 1858014 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-242108
I0923 12:06:10.510423 1858014 out.go:177] - Using image registry.k8s.io/sig-storage/snapshot-controller:v6.1.0
I0923 12:06:10.513366 1858014 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml
I0923 12:06:10.513388 1858014 ssh_runner.go:362] scp volumesnapshots/csi-hostpath-snapshotclass.yaml --> /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml (934 bytes)
I0923 12:06:10.513441 1858014 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-242108
I0923 12:06:10.526347 1858014 out.go:177] - Using image docker.io/registry:2.8.3
I0923 12:06:10.527607 1858014 out.go:177] - Using image gcr.io/k8s-minikube/kube-registry-proxy:0.0.6
I0923 12:06:10.532454 1858014 addons.go:431] installing /etc/kubernetes/addons/registry-rc.yaml
I0923 12:06:10.532476 1858014 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-rc.yaml (860 bytes)
I0923 12:06:10.532541 1858014 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-242108
I0923 12:06:10.534356 1858014 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19690-1849651/.minikube/machines/addons-242108/id_rsa Username:docker}
I0923 12:06:10.541126 1858014 out.go:177] - Using image docker.io/rancher/local-path-provisioner:v0.0.22
I0923 12:06:10.542833 1858014 out.go:177] - Using image docker.io/busybox:stable
I0923 12:06:10.546329 1858014 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner-rancher.yaml
I0923 12:06:10.546350 1858014 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner-rancher.yaml (3113 bytes)
I0923 12:06:10.546405 1858014 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-242108
I0923 12:06:10.564802 1858014 out.go:177] - Using image docker.io/marcnuri/yakd:0.0.5
I0923 12:06:10.569509 1858014 addons.go:431] installing /etc/kubernetes/addons/yakd-ns.yaml
I0923 12:06:10.569535 1858014 ssh_runner.go:362] scp yakd/yakd-ns.yaml --> /etc/kubernetes/addons/yakd-ns.yaml (171 bytes)
I0923 12:06:10.569588 1858014 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-242108
I0923 12:06:10.572182 1858014 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19690-1849651/.minikube/machines/addons-242108/id_rsa Username:docker}
I0923 12:06:10.572338 1858014 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19690-1849651/.minikube/machines/addons-242108/id_rsa Username:docker}
I0923 12:06:10.574724 1858014 out.go:177] - Using image ghcr.io/inspektor-gadget/inspektor-gadget:v0.32.0
I0923 12:06:10.574845 1858014 out.go:177] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0923 12:06:10.576988 1858014 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19690-1849651/.minikube/machines/addons-242108/id_rsa Username:docker}
I0923 12:06:10.578938 1858014 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19690-1849651/.minikube/machines/addons-242108/id_rsa Username:docker}
I0923 12:06:10.584402 1858014 addons.go:431] installing /etc/kubernetes/addons/ig-namespace.yaml
I0923 12:06:10.584427 1858014 ssh_runner.go:362] scp inspektor-gadget/ig-namespace.yaml --> /etc/kubernetes/addons/ig-namespace.yaml (55 bytes)
I0923 12:06:10.584485 1858014 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-242108
I0923 12:06:10.584750 1858014 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0923 12:06:10.584765 1858014 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0923 12:06:10.584812 1858014 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-242108
I0923 12:06:10.593015 1858014 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19690-1849651/.minikube/machines/addons-242108/id_rsa Username:docker}
I0923 12:06:10.593028 1858014 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19690-1849651/.minikube/machines/addons-242108/id_rsa Username:docker}
I0923 12:06:10.593040 1858014 addons.go:431] installing /etc/kubernetes/addons/storageclass.yaml
I0923 12:06:10.593128 1858014 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0923 12:06:10.593185 1858014 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-242108
I0923 12:06:10.594846 1858014 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19690-1849651/.minikube/machines/addons-242108/id_rsa Username:docker}
I0923 12:06:10.608452 1858014 out.go:177] - Using image docker.io/volcanosh/vc-controller-manager:v1.9.0
I0923 12:06:10.608455 1858014 out.go:177] - Using image gcr.io/cloud-spanner-emulator/emulator:1.5.24
I0923 12:06:10.610046 1858014 addons.go:431] installing /etc/kubernetes/addons/deployment.yaml
I0923 12:06:10.610077 1858014 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/deployment.yaml (1004 bytes)
I0923 12:06:10.610142 1858014 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-242108
I0923 12:06:10.614278 1858014 out.go:177] - Using image docker.io/volcanosh/vc-scheduler:v1.9.0
I0923 12:06:10.616248 1858014 out.go:177] - Using image docker.io/volcanosh/vc-webhook-manager:v1.9.0
I0923 12:06:10.617754 1858014 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19690-1849651/.minikube/machines/addons-242108/id_rsa Username:docker}
I0923 12:06:10.621161 1858014 addons.go:431] installing /etc/kubernetes/addons/volcano-deployment.yaml
I0923 12:06:10.621189 1858014 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/volcano-deployment.yaml (434001 bytes)
I0923 12:06:10.621258 1858014 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-242108
I0923 12:06:10.622434 1858014 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19690-1849651/.minikube/machines/addons-242108/id_rsa Username:docker}
I0923 12:06:10.622874 1858014 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19690-1849651/.minikube/machines/addons-242108/id_rsa Username:docker}
I0923 12:06:10.623924 1858014 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19690-1849651/.minikube/machines/addons-242108/id_rsa Username:docker}
I0923 12:06:10.629238 1858014 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19690-1849651/.minikube/machines/addons-242108/id_rsa Username:docker}
I0923 12:06:10.637438 1858014 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19690-1849651/.minikube/machines/addons-242108/id_rsa Username:docker}
I0923 12:06:10.762288 1858014 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0923 12:06:10.762524 1858014 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I0923 12:06:10.955337 1858014 addons.go:431] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
I0923 12:06:10.955368 1858014 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1907 bytes)
I0923 12:06:11.049347 1858014 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml
I0923 12:06:11.055633 1858014 addons.go:431] installing /etc/kubernetes/addons/rbac-hostpath.yaml
I0923 12:06:11.055668 1858014 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-hostpath.yaml --> /etc/kubernetes/addons/rbac-hostpath.yaml (4266 bytes)
I0923 12:06:11.156759 1858014 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml
I0923 12:06:11.162585 1858014 addons.go:431] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
I0923 12:06:11.162615 1858014 ssh_runner.go:362] scp metrics-server/metrics-server-rbac.yaml --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
I0923 12:06:11.245459 1858014 addons.go:431] installing /etc/kubernetes/addons/yakd-sa.yaml
I0923 12:06:11.245567 1858014 ssh_runner.go:362] scp yakd/yakd-sa.yaml --> /etc/kubernetes/addons/yakd-sa.yaml (247 bytes)
I0923 12:06:11.252685 1858014 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml
I0923 12:06:11.252774 1858014 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshotclasses.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml (6471 bytes)
I0923 12:06:11.254979 1858014 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml
I0923 12:06:11.260217 1858014 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml
I0923 12:06:11.344246 1858014 addons.go:431] installing /etc/kubernetes/addons/registry-svc.yaml
I0923 12:06:11.344356 1858014 ssh_runner.go:362] scp registry/registry-svc.yaml --> /etc/kubernetes/addons/registry-svc.yaml (398 bytes)
I0923 12:06:11.348472 1858014 addons.go:431] installing /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml
I0923 12:06:11.348504 1858014 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-health-monitor-controller.yaml --> /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml (3038 bytes)
I0923 12:06:11.350167 1858014 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0923 12:06:11.351955 1858014 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0923 12:06:11.355953 1858014 addons.go:431] installing /etc/kubernetes/addons/ig-serviceaccount.yaml
I0923 12:06:11.356010 1858014 ssh_runner.go:362] scp inspektor-gadget/ig-serviceaccount.yaml --> /etc/kubernetes/addons/ig-serviceaccount.yaml (80 bytes)
I0923 12:06:11.365635 1858014 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/volcano-deployment.yaml
I0923 12:06:11.445035 1858014 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml
I0923 12:06:11.445134 1858014 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshotcontents.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml (23126 bytes)
I0923 12:06:11.459924 1858014 addons.go:431] installing /etc/kubernetes/addons/rbac-external-provisioner.yaml
I0923 12:06:11.460034 1858014 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-provisioner.yaml --> /etc/kubernetes/addons/rbac-external-provisioner.yaml (4442 bytes)
I0923 12:06:11.546326 1858014 addons.go:431] installing /etc/kubernetes/addons/metrics-server-service.yaml
I0923 12:06:11.546419 1858014 ssh_runner.go:362] scp metrics-server/metrics-server-service.yaml --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
I0923 12:06:11.557952 1858014 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/deployment.yaml
I0923 12:06:11.570404 1858014 addons.go:431] installing /etc/kubernetes/addons/registry-proxy.yaml
I0923 12:06:11.570509 1858014 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-proxy.yaml (947 bytes)
I0923 12:06:11.649616 1858014 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml
I0923 12:06:11.649707 1858014 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshots.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml (19582 bytes)
I0923 12:06:11.653147 1858014 addons.go:431] installing /etc/kubernetes/addons/yakd-crb.yaml
I0923 12:06:11.653235 1858014 ssh_runner.go:362] scp yakd/yakd-crb.yaml --> /etc/kubernetes/addons/yakd-crb.yaml (422 bytes)
I0923 12:06:11.764894 1858014 addons.go:431] installing /etc/kubernetes/addons/rbac-external-resizer.yaml
I0923 12:06:11.764993 1858014 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-resizer.yaml --> /etc/kubernetes/addons/rbac-external-resizer.yaml (2943 bytes)
I0923 12:06:11.853991 1858014 addons.go:431] installing /etc/kubernetes/addons/ig-role.yaml
I0923 12:06:11.854084 1858014 ssh_runner.go:362] scp inspektor-gadget/ig-role.yaml --> /etc/kubernetes/addons/ig-role.yaml (210 bytes)
I0923 12:06:11.855937 1858014 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
I0923 12:06:11.857321 1858014 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml
I0923 12:06:11.865897 1858014 addons.go:431] installing /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml
I0923 12:06:11.865992 1858014 ssh_runner.go:362] scp volumesnapshots/rbac-volume-snapshot-controller.yaml --> /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml (3545 bytes)
I0923 12:06:12.061748 1858014 addons.go:431] installing /etc/kubernetes/addons/yakd-svc.yaml
I0923 12:06:12.061781 1858014 ssh_runner.go:362] scp yakd/yakd-svc.yaml --> /etc/kubernetes/addons/yakd-svc.yaml (412 bytes)
I0923 12:06:12.160061 1858014 addons.go:431] installing /etc/kubernetes/addons/ig-rolebinding.yaml
I0923 12:06:12.160193 1858014 ssh_runner.go:362] scp inspektor-gadget/ig-rolebinding.yaml --> /etc/kubernetes/addons/ig-rolebinding.yaml (244 bytes)
I0923 12:06:12.456992 1858014 addons.go:431] installing /etc/kubernetes/addons/rbac-external-snapshotter.yaml
I0923 12:06:12.457094 1858014 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-snapshotter.yaml --> /etc/kubernetes/addons/rbac-external-snapshotter.yaml (3149 bytes)
I0923 12:06:12.464245 1858014 addons.go:431] installing /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0923 12:06:12.464334 1858014 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml (1475 bytes)
I0923 12:06:12.564338 1858014 addons.go:431] installing /etc/kubernetes/addons/yakd-dp.yaml
I0923 12:06:12.564367 1858014 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/yakd-dp.yaml (2017 bytes)
I0923 12:06:12.950718 1858014 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -": (2.188144369s)
I0923 12:06:12.950835 1858014 start.go:971] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
I0923 12:06:12.951842 1858014 ssh_runner.go:235] Completed: sudo systemctl start kubelet: (2.189519718s)
I0923 12:06:12.952711 1858014 node_ready.go:35] waiting up to 6m0s for node "addons-242108" to be "Ready" ...
I0923 12:06:12.955464 1858014 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-attacher.yaml
I0923 12:06:12.955541 1858014 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-attacher.yaml (2143 bytes)
I0923 12:06:12.960859 1858014 node_ready.go:49] node "addons-242108" has status "Ready":"True"
I0923 12:06:12.960925 1858014 node_ready.go:38] duration metric: took 8.180006ms for node "addons-242108" to be "Ready" ...
I0923 12:06:12.960952 1858014 pod_ready.go:36] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0923 12:06:12.966043 1858014 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml
I0923 12:06:12.966787 1858014 addons.go:431] installing /etc/kubernetes/addons/ig-clusterrole.yaml
I0923 12:06:12.966812 1858014 ssh_runner.go:362] scp inspektor-gadget/ig-clusterrole.yaml --> /etc/kubernetes/addons/ig-clusterrole.yaml (1485 bytes)
I0923 12:06:12.971485 1858014 pod_ready.go:79] waiting up to 6m0s for pod "coredns-7c65d6cfc9-lwcml" in "kube-system" namespace to be "Ready" ...
I0923 12:06:13.047888 1858014 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0923 12:06:13.455473 1858014 kapi.go:214] "coredns" deployment in "kube-system" namespace and "addons-242108" context rescaled to 1 replicas
I0923 12:06:13.546673 1858014 addons.go:431] installing /etc/kubernetes/addons/ig-clusterrolebinding.yaml
I0923 12:06:13.546707 1858014 ssh_runner.go:362] scp inspektor-gadget/ig-clusterrolebinding.yaml --> /etc/kubernetes/addons/ig-clusterrolebinding.yaml (274 bytes)
I0923 12:06:13.761012 1858014 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml
I0923 12:06:13.761055 1858014 ssh_runner.go:362] scp csi-hostpath-driver/deploy/csi-hostpath-driverinfo.yaml --> /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml (1274 bytes)
I0923 12:06:14.146967 1858014 addons.go:431] installing /etc/kubernetes/addons/ig-crd.yaml
I0923 12:06:14.147050 1858014 ssh_runner.go:362] scp inspektor-gadget/ig-crd.yaml --> /etc/kubernetes/addons/ig-crd.yaml (5216 bytes)
I0923 12:06:14.245616 1858014 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-plugin.yaml
I0923 12:06:14.245713 1858014 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-plugin.yaml (8201 bytes)
I0923 12:06:14.347420 1858014 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml: (3.298024815s)
I0923 12:06:14.347498 1858014 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml: (3.190706669s)
I0923 12:06:14.857969 1858014 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-resizer.yaml
I0923 12:06:14.858070 1858014 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-resizer.yaml (2191 bytes)
I0923 12:06:15.053079 1858014 addons.go:431] installing /etc/kubernetes/addons/ig-daemonset.yaml
I0923 12:06:15.053192 1858014 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-daemonset.yaml (7735 bytes)
I0923 12:06:15.060875 1858014 pod_ready.go:103] pod "coredns-7c65d6cfc9-lwcml" in "kube-system" namespace has status "Ready":"False"
I0923 12:06:15.453447 1858014 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ig-namespace.yaml -f /etc/kubernetes/addons/ig-serviceaccount.yaml -f /etc/kubernetes/addons/ig-role.yaml -f /etc/kubernetes/addons/ig-rolebinding.yaml -f /etc/kubernetes/addons/ig-clusterrole.yaml -f /etc/kubernetes/addons/ig-clusterrolebinding.yaml -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-daemonset.yaml
I0923 12:06:15.549323 1858014 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
I0923 12:06:15.549360 1858014 ssh_runner.go:362] scp csi-hostpath-driver/deploy/csi-hostpath-storageclass.yaml --> /etc/kubernetes/addons/csi-hostpath-storageclass.yaml (846 bytes)
I0923 12:06:16.050587 1858014 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
I0923 12:06:16.545506 1858014 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml: (5.290437587s)
I0923 12:06:17.550581 1858014 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_application_credentials.json (162 bytes)
I0923 12:06:17.550751 1858014 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-242108
I0923 12:06:17.551335 1858014 pod_ready.go:103] pod "coredns-7c65d6cfc9-lwcml" in "kube-system" namespace has status "Ready":"False"
I0923 12:06:17.576563 1858014 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19690-1849651/.minikube/machines/addons-242108/id_rsa Username:docker}
I0923 12:06:18.556147 1858014 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_cloud_project (12 bytes)
I0923 12:06:18.868200 1858014 addons.go:234] Setting addon gcp-auth=true in "addons-242108"
I0923 12:06:18.868286 1858014 host.go:66] Checking if "addons-242108" exists ...
I0923 12:06:18.869104 1858014 cli_runner.go:164] Run: docker container inspect addons-242108 --format={{.State.Status}}
I0923 12:06:18.887618 1858014 ssh_runner.go:195] Run: cat /var/lib/minikube/google_application_credentials.json
I0923 12:06:18.887673 1858014 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-242108
I0923 12:06:18.903690 1858014 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19690-1849651/.minikube/machines/addons-242108/id_rsa Username:docker}
I0923 12:06:19.552201 1858014 pod_ready.go:103] pod "coredns-7c65d6cfc9-lwcml" in "kube-system" namespace has status "Ready":"False"
I0923 12:06:20.645824 1858014 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml: (9.385552298s)
I0923 12:06:20.645921 1858014 addons.go:475] Verifying addon ingress=true in "addons-242108"
I0923 12:06:20.645955 1858014 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: (9.295686748s)
I0923 12:06:20.646254 1858014 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (9.294248119s)
I0923 12:06:20.649090 1858014 out.go:177] * Verifying ingress addon...
I0923 12:06:20.651785 1858014 kapi.go:75] Waiting for pod with label "app.kubernetes.io/name=ingress-nginx" in ns "ingress-nginx" ...
I0923 12:06:20.657104 1858014 kapi.go:86] Found 3 Pods for label selector app.kubernetes.io/name=ingress-nginx
I0923 12:06:20.657136 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:21.164030 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:21.657690 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:22.061112 1858014 pod_ready.go:103] pod "coredns-7c65d6cfc9-lwcml" in "kube-system" namespace has status "Ready":"False"
I0923 12:06:22.248137 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:22.657232 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:23.157974 1858014 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/deployment.yaml: (11.599917142s)
I0923 12:06:23.158056 1858014 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml: (11.3006597s)
I0923 12:06:23.158070 1858014 addons.go:475] Verifying addon registry=true in "addons-242108"
I0923 12:06:23.158386 1858014 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (11.302409232s)
I0923 12:06:23.158415 1858014 addons.go:475] Verifying addon metrics-server=true in "addons-242108"
I0923 12:06:23.158478 1858014 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml: (10.1924045s)
I0923 12:06:23.158819 1858014 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (10.110819223s)
W0923 12:06:23.158854 1858014 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
stdout:
customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
serviceaccount/snapshot-controller created
clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
deployment.apps/snapshot-controller created
stderr:
error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
ensure CRDs are installed first
I0923 12:06:23.158913 1858014 retry.go:31] will retry after 212.277607ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
stdout:
customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
serviceaccount/snapshot-controller created
clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
deployment.apps/snapshot-controller created
stderr:
error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
ensure CRDs are installed first
I0923 12:06:23.159030 1858014 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ig-namespace.yaml -f /etc/kubernetes/addons/ig-serviceaccount.yaml -f /etc/kubernetes/addons/ig-role.yaml -f /etc/kubernetes/addons/ig-rolebinding.yaml -f /etc/kubernetes/addons/ig-clusterrole.yaml -f /etc/kubernetes/addons/ig-clusterrolebinding.yaml -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-daemonset.yaml: (7.705542711s)
I0923 12:06:23.159248 1858014 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/volcano-deployment.yaml: (11.793525978s)
I0923 12:06:23.160055 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:23.161142 1858014 out.go:177] * To access YAKD - Kubernetes Dashboard, wait for Pod to be ready and run the following command:
minikube -p addons-242108 service yakd-dashboard -n yakd-dashboard
I0923 12:06:23.161257 1858014 out.go:177] * Verifying registry addon...
I0923 12:06:23.165292 1858014 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=registry" in ns "kube-system" ...
I0923 12:06:23.168438 1858014 kapi.go:86] Found 2 Pods for label selector kubernetes.io/minikube-addons=registry
I0923 12:06:23.168455 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:23.371636 1858014 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0923 12:06:23.658281 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:23.759015 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:24.157287 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:24.256805 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:24.555506 1858014 pod_ready.go:103] pod "coredns-7c65d6cfc9-lwcml" in "kube-system" namespace has status "Ready":"False"
I0923 12:06:24.561426 1858014 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml: (8.510770976s)
I0923 12:06:24.561468 1858014 addons.go:475] Verifying addon csi-hostpath-driver=true in "addons-242108"
I0923 12:06:24.561707 1858014 ssh_runner.go:235] Completed: cat /var/lib/minikube/google_application_credentials.json: (5.673869276s)
I0923 12:06:24.562856 1858014 out.go:177] * Verifying csi-hostpath-driver addon...
I0923 12:06:24.562970 1858014 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.3
I0923 12:06:24.564784 1858014 out.go:177] - Using image gcr.io/k8s-minikube/gcp-auth-webhook:v0.1.2
I0923 12:06:24.565761 1858014 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=csi-hostpath-driver" in ns "kube-system" ...
I0923 12:06:24.566325 1858014 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-ns.yaml
I0923 12:06:24.566374 1858014 ssh_runner.go:362] scp gcp-auth/gcp-auth-ns.yaml --> /etc/kubernetes/addons/gcp-auth-ns.yaml (700 bytes)
I0923 12:06:24.571115 1858014 kapi.go:86] Found 3 Pods for label selector kubernetes.io/minikube-addons=csi-hostpath-driver
I0923 12:06:24.571136 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:24.656675 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:24.663796 1858014 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-service.yaml
I0923 12:06:24.663836 1858014 ssh_runner.go:362] scp gcp-auth/gcp-auth-service.yaml --> /etc/kubernetes/addons/gcp-auth-service.yaml (788 bytes)
I0923 12:06:24.675411 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:24.684791 1858014 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-webhook.yaml
I0923 12:06:24.684810 1858014 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/gcp-auth-webhook.yaml (5421 bytes)
I0923 12:06:24.765606 1858014 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml
I0923 12:06:25.071838 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:25.156495 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:25.169415 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:25.571430 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:25.657371 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:25.670080 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:26.044791 1858014 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (2.673086558s)
I0923 12:06:26.072489 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:26.172467 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:26.173096 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:26.377041 1858014 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml: (1.61138785s)
I0923 12:06:26.379158 1858014 addons.go:475] Verifying addon gcp-auth=true in "addons-242108"
I0923 12:06:26.380801 1858014 out.go:177] * Verifying gcp-auth addon...
I0923 12:06:26.382596 1858014 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=gcp-auth" in ns "gcp-auth" ...
I0923 12:06:26.444488 1858014 kapi.go:86] Found 0 Pods for label selector kubernetes.io/minikube-addons=gcp-auth
I0923 12:06:26.479679 1858014 pod_ready.go:98] pod "coredns-7c65d6cfc9-lwcml" in "kube-system" namespace has status phase "Succeeded" (skipping!): {Phase:Succeeded Conditions:[{Type:PodReadyToStartContainers Status:False LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-09-23 12:06:26 +0000 UTC Reason: Message:} {Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-09-23 12:06:11 +0000 UTC Reason:PodCompleted Message:} {Type:Ready Status:False LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-09-23 12:06:11 +0000 UTC Reason:PodCompleted Message:} {Type:ContainersReady Status:False LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-09-23 12:06:11 +0000 UTC Reason:PodCompleted Message:} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-09-23 12:06:11 +0000 UTC Reason: Message:}] Message: Reason: NominatedNodeName: HostIP:192.168.49.2 HostIPs:[{IP:192.168.49.2
}] PodIP:10.244.0.2 PodIPs:[{IP:10.244.0.2}] StartTime:2024-09-23 12:06:11 +0000 UTC InitContainerStatuses:[] ContainerStatuses:[{Name:coredns State:{Waiting:nil Running:nil Terminated:&ContainerStateTerminated{ExitCode:0,Signal:0,Reason:Completed,Message:,StartedAt:2024-09-23 12:06:14 +0000 UTC,FinishedAt:2024-09-23 12:06:25 +0000 UTC,ContainerID:docker://d53e33cdf27058c5516a27345bd639e4c51ee785d9b04e214ac07d302ce781f0,}} LastTerminationState:{Waiting:nil Running:nil Terminated:nil} Ready:false RestartCount:0 Image:registry.k8s.io/coredns/coredns:v1.11.3 ImageID:docker-pullable://registry.k8s.io/coredns/coredns@sha256:9caabbf6238b189a65d0d6e6ac138de60d6a1c419e5a341fbbb7c78382559c6e ContainerID:docker://d53e33cdf27058c5516a27345bd639e4c51ee785d9b04e214ac07d302ce781f0 Started:0xc001fae4f0 AllocatedResources:map[] Resources:nil VolumeMounts:[{Name:config-volume MountPath:/etc/coredns ReadOnly:true RecursiveReadOnly:0xc00196c3b0} {Name:kube-api-access-c8mnb MountPath:/var/run/secrets/kubernetes.io/serviceaccount
ReadOnly:true RecursiveReadOnly:0xc00196c3c0}] User:nil AllocatedResourcesStatus:[]}] QOSClass:Burstable EphemeralContainerStatuses:[] Resize: ResourceClaimStatuses:[]}
I0923 12:06:26.479710 1858014 pod_ready.go:82] duration metric: took 13.508195849s for pod "coredns-7c65d6cfc9-lwcml" in "kube-system" namespace to be "Ready" ...
E0923 12:06:26.479723 1858014 pod_ready.go:67] WaitExtra: waitPodCondition: pod "coredns-7c65d6cfc9-lwcml" in "kube-system" namespace has status phase "Succeeded" (skipping!): {Phase:Succeeded Conditions:[{Type:PodReadyToStartContainers Status:False LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-09-23 12:06:26 +0000 UTC Reason: Message:} {Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-09-23 12:06:11 +0000 UTC Reason:PodCompleted Message:} {Type:Ready Status:False LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-09-23 12:06:11 +0000 UTC Reason:PodCompleted Message:} {Type:ContainersReady Status:False LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-09-23 12:06:11 +0000 UTC Reason:PodCompleted Message:} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-09-23 12:06:11 +0000 UTC Reason: Message:}] Message: Reason: NominatedNodeName: HostIP:192.168.4
9.2 HostIPs:[{IP:192.168.49.2}] PodIP:10.244.0.2 PodIPs:[{IP:10.244.0.2}] StartTime:2024-09-23 12:06:11 +0000 UTC InitContainerStatuses:[] ContainerStatuses:[{Name:coredns State:{Waiting:nil Running:nil Terminated:&ContainerStateTerminated{ExitCode:0,Signal:0,Reason:Completed,Message:,StartedAt:2024-09-23 12:06:14 +0000 UTC,FinishedAt:2024-09-23 12:06:25 +0000 UTC,ContainerID:docker://d53e33cdf27058c5516a27345bd639e4c51ee785d9b04e214ac07d302ce781f0,}} LastTerminationState:{Waiting:nil Running:nil Terminated:nil} Ready:false RestartCount:0 Image:registry.k8s.io/coredns/coredns:v1.11.3 ImageID:docker-pullable://registry.k8s.io/coredns/coredns@sha256:9caabbf6238b189a65d0d6e6ac138de60d6a1c419e5a341fbbb7c78382559c6e ContainerID:docker://d53e33cdf27058c5516a27345bd639e4c51ee785d9b04e214ac07d302ce781f0 Started:0xc001fae4f0 AllocatedResources:map[] Resources:nil VolumeMounts:[{Name:config-volume MountPath:/etc/coredns ReadOnly:true RecursiveReadOnly:0xc00196c3b0} {Name:kube-api-access-c8mnb MountPath:/var/run/secrets
/kubernetes.io/serviceaccount ReadOnly:true RecursiveReadOnly:0xc00196c3c0}] User:nil AllocatedResourcesStatus:[]}] QOSClass:Burstable EphemeralContainerStatuses:[] Resize: ResourceClaimStatuses:[]}
I0923 12:06:26.479735 1858014 pod_ready.go:79] waiting up to 6m0s for pod "coredns-7c65d6cfc9-mv6mb" in "kube-system" namespace to be "Ready" ...
I0923 12:06:26.570586 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:26.669872 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:26.670174 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:27.071125 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:27.156327 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:27.169807 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:27.571481 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:27.656188 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:27.669525 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:28.070699 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:28.156403 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:28.168572 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:28.486136 1858014 pod_ready.go:103] pod "coredns-7c65d6cfc9-mv6mb" in "kube-system" namespace has status "Ready":"False"
I0923 12:06:28.570115 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:28.656582 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:28.668710 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:29.070171 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:29.155652 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:29.168774 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:29.570486 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:29.656030 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:29.668723 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:30.094491 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:30.156480 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:30.256180 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:30.571313 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:30.670357 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:30.670804 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:30.986423 1858014 pod_ready.go:103] pod "coredns-7c65d6cfc9-mv6mb" in "kube-system" namespace has status "Ready":"False"
I0923 12:06:31.070709 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:31.155857 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:31.169049 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:31.571926 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:31.671784 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:31.672201 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:32.070942 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:32.156651 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:32.168457 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:32.570911 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:32.656794 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:32.668926 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:33.070358 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:33.155662 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:33.169110 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:33.485935 1858014 pod_ready.go:103] pod "coredns-7c65d6cfc9-mv6mb" in "kube-system" namespace has status "Ready":"False"
I0923 12:06:33.571678 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:33.656370 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:33.668582 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:34.070680 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:34.156488 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:34.169167 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:34.570774 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:34.657292 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:34.757618 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:35.070555 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:35.156341 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:35.168199 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:35.486661 1858014 pod_ready.go:103] pod "coredns-7c65d6cfc9-mv6mb" in "kube-system" namespace has status "Ready":"False"
I0923 12:06:35.570567 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:35.656603 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:35.669227 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:36.070602 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:36.156081 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:36.168531 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:36.570601 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:36.670193 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:36.671113 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:37.070261 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:37.157354 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:37.168629 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:37.570950 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:37.656122 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:37.669142 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:37.985260 1858014 pod_ready.go:103] pod "coredns-7c65d6cfc9-mv6mb" in "kube-system" namespace has status "Ready":"False"
I0923 12:06:38.070403 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:38.155824 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:38.169145 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:38.569848 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:38.655387 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:38.669182 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:39.069993 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:39.155440 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:39.168592 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:39.571696 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:39.656694 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:39.668753 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:39.986047 1858014 pod_ready.go:103] pod "coredns-7c65d6cfc9-mv6mb" in "kube-system" namespace has status "Ready":"False"
I0923 12:06:40.069982 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:40.156185 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:40.168709 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:40.570047 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:40.657326 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:40.757375 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:41.103509 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:41.203970 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:41.204110 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:41.571457 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:41.656097 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:41.669526 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:41.986157 1858014 pod_ready.go:103] pod "coredns-7c65d6cfc9-mv6mb" in "kube-system" namespace has status "Ready":"False"
I0923 12:06:42.071206 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:42.155949 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:42.169449 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:42.571567 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:42.656734 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:42.668933 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:43.071545 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:43.156412 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:43.168745 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:43.570454 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:43.656642 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:43.669086 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:43.986711 1858014 pod_ready.go:103] pod "coredns-7c65d6cfc9-mv6mb" in "kube-system" namespace has status "Ready":"False"
I0923 12:06:44.071398 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:44.156037 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:44.169232 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:44.570618 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:44.657214 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:44.669523 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:45.070968 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:45.155852 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:45.168873 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:45.570130 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:45.655949 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:45.669031 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 12:06:46.069929 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:46.156342 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:46.169503 1858014 kapi.go:107] duration metric: took 23.004206885s to wait for kubernetes.io/minikube-addons=registry ...
I0923 12:06:46.485979 1858014 pod_ready.go:103] pod "coredns-7c65d6cfc9-mv6mb" in "kube-system" namespace has status "Ready":"False"
I0923 12:06:46.570659 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:46.656684 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:47.070775 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:47.155261 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:47.570236 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:47.655960 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:48.071052 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:48.156205 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:48.486064 1858014 pod_ready.go:103] pod "coredns-7c65d6cfc9-mv6mb" in "kube-system" namespace has status "Ready":"False"
I0923 12:06:48.570088 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:48.656274 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:49.070799 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:49.156276 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:49.570309 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:49.656430 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:50.070951 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:50.156144 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:50.571300 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:50.655401 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:50.986494 1858014 pod_ready.go:103] pod "coredns-7c65d6cfc9-mv6mb" in "kube-system" namespace has status "Ready":"False"
I0923 12:06:51.070636 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:51.155871 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:51.486014 1858014 pod_ready.go:93] pod "coredns-7c65d6cfc9-mv6mb" in "kube-system" namespace has status "Ready":"True"
I0923 12:06:51.486037 1858014 pod_ready.go:82] duration metric: took 25.006290168s for pod "coredns-7c65d6cfc9-mv6mb" in "kube-system" namespace to be "Ready" ...
I0923 12:06:51.486048 1858014 pod_ready.go:79] waiting up to 6m0s for pod "etcd-addons-242108" in "kube-system" namespace to be "Ready" ...
I0923 12:06:51.490782 1858014 pod_ready.go:93] pod "etcd-addons-242108" in "kube-system" namespace has status "Ready":"True"
I0923 12:06:51.490800 1858014 pod_ready.go:82] duration metric: took 4.746698ms for pod "etcd-addons-242108" in "kube-system" namespace to be "Ready" ...
I0923 12:06:51.490809 1858014 pod_ready.go:79] waiting up to 6m0s for pod "kube-apiserver-addons-242108" in "kube-system" namespace to be "Ready" ...
I0923 12:06:51.495126 1858014 pod_ready.go:93] pod "kube-apiserver-addons-242108" in "kube-system" namespace has status "Ready":"True"
I0923 12:06:51.495145 1858014 pod_ready.go:82] duration metric: took 4.329561ms for pod "kube-apiserver-addons-242108" in "kube-system" namespace to be "Ready" ...
I0923 12:06:51.495153 1858014 pod_ready.go:79] waiting up to 6m0s for pod "kube-controller-manager-addons-242108" in "kube-system" namespace to be "Ready" ...
I0923 12:06:51.499027 1858014 pod_ready.go:93] pod "kube-controller-manager-addons-242108" in "kube-system" namespace has status "Ready":"True"
I0923 12:06:51.499043 1858014 pod_ready.go:82] duration metric: took 3.88464ms for pod "kube-controller-manager-addons-242108" in "kube-system" namespace to be "Ready" ...
I0923 12:06:51.499052 1858014 pod_ready.go:79] waiting up to 6m0s for pod "kube-proxy-ftvdd" in "kube-system" namespace to be "Ready" ...
I0923 12:06:51.502754 1858014 pod_ready.go:93] pod "kube-proxy-ftvdd" in "kube-system" namespace has status "Ready":"True"
I0923 12:06:51.502768 1858014 pod_ready.go:82] duration metric: took 3.711073ms for pod "kube-proxy-ftvdd" in "kube-system" namespace to be "Ready" ...
I0923 12:06:51.502775 1858014 pod_ready.go:79] waiting up to 6m0s for pod "kube-scheduler-addons-242108" in "kube-system" namespace to be "Ready" ...
I0923 12:06:51.570338 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:51.656750 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:51.884526 1858014 pod_ready.go:93] pod "kube-scheduler-addons-242108" in "kube-system" namespace has status "Ready":"True"
I0923 12:06:51.884559 1858014 pod_ready.go:82] duration metric: took 381.773758ms for pod "kube-scheduler-addons-242108" in "kube-system" namespace to be "Ready" ...
I0923 12:06:51.884573 1858014 pod_ready.go:79] waiting up to 6m0s for pod "nvidia-device-plugin-daemonset-gmznf" in "kube-system" namespace to be "Ready" ...
I0923 12:06:52.070256 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:52.156369 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:52.284661 1858014 pod_ready.go:93] pod "nvidia-device-plugin-daemonset-gmznf" in "kube-system" namespace has status "Ready":"True"
I0923 12:06:52.284687 1858014 pod_ready.go:82] duration metric: took 400.105551ms for pod "nvidia-device-plugin-daemonset-gmznf" in "kube-system" namespace to be "Ready" ...
I0923 12:06:52.284699 1858014 pod_ready.go:39] duration metric: took 39.323700487s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0923 12:06:52.284726 1858014 api_server.go:52] waiting for apiserver process to appear ...
I0923 12:06:52.284793 1858014 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0923 12:06:52.302488 1858014 api_server.go:72] duration metric: took 41.850892214s to wait for apiserver process to appear ...
I0923 12:06:52.302514 1858014 api_server.go:88] waiting for apiserver healthz status ...
I0923 12:06:52.302538 1858014 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I0923 12:06:52.306731 1858014 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I0923 12:06:52.307786 1858014 api_server.go:141] control plane version: v1.31.1
I0923 12:06:52.307812 1858014 api_server.go:131] duration metric: took 5.289848ms to wait for apiserver health ...
I0923 12:06:52.307823 1858014 system_pods.go:43] waiting for kube-system pods to appear ...
I0923 12:06:52.490907 1858014 system_pods.go:59] 17 kube-system pods found
I0923 12:06:52.490943 1858014 system_pods.go:61] "coredns-7c65d6cfc9-mv6mb" [780855f9-b1ef-44de-8831-b44b48a8da8d] Running
I0923 12:06:52.490958 1858014 system_pods.go:61] "csi-hostpath-attacher-0" [046dce09-6835-4514-8e87-ad488f4f048a] Pending / Ready:ContainersNotReady (containers with unready status: [csi-attacher]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-attacher])
I0923 12:06:52.490969 1858014 system_pods.go:61] "csi-hostpath-resizer-0" [26fca953-d0c2-4d84-a703-f37a7b537043] Pending / Ready:ContainersNotReady (containers with unready status: [csi-resizer]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-resizer])
I0923 12:06:52.490982 1858014 system_pods.go:61] "csi-hostpathplugin-wdkfx" [919811bc-3ecf-4d2f-ad60-a05bca377783] Pending / Ready:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter])
I0923 12:06:52.490994 1858014 system_pods.go:61] "etcd-addons-242108" [32368bf0-b7cf-46b8-9894-4530bde39994] Running
I0923 12:06:52.491001 1858014 system_pods.go:61] "kube-apiserver-addons-242108" [9d4abfaa-11d9-454d-a962-961871b29f54] Running
I0923 12:06:52.491007 1858014 system_pods.go:61] "kube-controller-manager-addons-242108" [d537b4b5-3647-4ff0-9f22-04c8c940826a] Running
I0923 12:06:52.491016 1858014 system_pods.go:61] "kube-ingress-dns-minikube" [25084b92-7ad9-404c-b24f-33c66916dfeb] Running
I0923 12:06:52.491022 1858014 system_pods.go:61] "kube-proxy-ftvdd" [735274d1-19a4-4f6d-820e-46903e25a051] Running
I0923 12:06:52.491030 1858014 system_pods.go:61] "kube-scheduler-addons-242108" [eb270170-7bb5-4cce-a7d9-8816c80e0c1a] Running
I0923 12:06:52.491043 1858014 system_pods.go:61] "metrics-server-84c5f94fbc-p2k4n" [203e2b34-9ef4-4008-ae69-b3be67345944] Running
I0923 12:06:52.491051 1858014 system_pods.go:61] "nvidia-device-plugin-daemonset-gmznf" [f94de300-9fad-47d1-b746-f8ff890faea0] Running
I0923 12:06:52.491056 1858014 system_pods.go:61] "registry-66c9cd494c-tvxjt" [9c3d099a-ff8c-4fd5-8178-5ad9ed8d1f67] Running
I0923 12:06:52.491063 1858014 system_pods.go:61] "registry-proxy-xwflp" [a13fee98-c1a9-4518-844c-031a36bf17b4] Running
I0923 12:06:52.491071 1858014 system_pods.go:61] "snapshot-controller-56fcc65765-gd4hz" [d2e424c1-c7a8-4c86-95bf-e15060712d3c] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
I0923 12:06:52.491083 1858014 system_pods.go:61] "snapshot-controller-56fcc65765-qkv8c" [86b09707-775d-426a-b61d-35a3e2e1cab3] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
I0923 12:06:52.491090 1858014 system_pods.go:61] "storage-provisioner" [86af1e72-72ea-4790-8656-e8e6e4c06e40] Running
I0923 12:06:52.491104 1858014 system_pods.go:74] duration metric: took 183.273937ms to wait for pod list to return data ...
I0923 12:06:52.491114 1858014 default_sa.go:34] waiting for default service account to be created ...
I0923 12:06:52.571176 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:52.657153 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:52.684112 1858014 default_sa.go:45] found service account: "default"
I0923 12:06:52.684141 1858014 default_sa.go:55] duration metric: took 193.019565ms for default service account to be created ...
I0923 12:06:52.684152 1858014 system_pods.go:116] waiting for k8s-apps to be running ...
I0923 12:06:52.889591 1858014 system_pods.go:86] 17 kube-system pods found
I0923 12:06:52.889622 1858014 system_pods.go:89] "coredns-7c65d6cfc9-mv6mb" [780855f9-b1ef-44de-8831-b44b48a8da8d] Running
I0923 12:06:52.889633 1858014 system_pods.go:89] "csi-hostpath-attacher-0" [046dce09-6835-4514-8e87-ad488f4f048a] Pending / Ready:ContainersNotReady (containers with unready status: [csi-attacher]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-attacher])
I0923 12:06:52.889641 1858014 system_pods.go:89] "csi-hostpath-resizer-0" [26fca953-d0c2-4d84-a703-f37a7b537043] Pending / Ready:ContainersNotReady (containers with unready status: [csi-resizer]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-resizer])
I0923 12:06:52.889652 1858014 system_pods.go:89] "csi-hostpathplugin-wdkfx" [919811bc-3ecf-4d2f-ad60-a05bca377783] Pending / Ready:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter])
I0923 12:06:52.889657 1858014 system_pods.go:89] "etcd-addons-242108" [32368bf0-b7cf-46b8-9894-4530bde39994] Running
I0923 12:06:52.889664 1858014 system_pods.go:89] "kube-apiserver-addons-242108" [9d4abfaa-11d9-454d-a962-961871b29f54] Running
I0923 12:06:52.889673 1858014 system_pods.go:89] "kube-controller-manager-addons-242108" [d537b4b5-3647-4ff0-9f22-04c8c940826a] Running
I0923 12:06:52.889682 1858014 system_pods.go:89] "kube-ingress-dns-minikube" [25084b92-7ad9-404c-b24f-33c66916dfeb] Running
I0923 12:06:52.889688 1858014 system_pods.go:89] "kube-proxy-ftvdd" [735274d1-19a4-4f6d-820e-46903e25a051] Running
I0923 12:06:52.889695 1858014 system_pods.go:89] "kube-scheduler-addons-242108" [eb270170-7bb5-4cce-a7d9-8816c80e0c1a] Running
I0923 12:06:52.889701 1858014 system_pods.go:89] "metrics-server-84c5f94fbc-p2k4n" [203e2b34-9ef4-4008-ae69-b3be67345944] Running
I0923 12:06:52.889710 1858014 system_pods.go:89] "nvidia-device-plugin-daemonset-gmznf" [f94de300-9fad-47d1-b746-f8ff890faea0] Running
I0923 12:06:52.889716 1858014 system_pods.go:89] "registry-66c9cd494c-tvxjt" [9c3d099a-ff8c-4fd5-8178-5ad9ed8d1f67] Running
I0923 12:06:52.889725 1858014 system_pods.go:89] "registry-proxy-xwflp" [a13fee98-c1a9-4518-844c-031a36bf17b4] Running
I0923 12:06:52.889737 1858014 system_pods.go:89] "snapshot-controller-56fcc65765-gd4hz" [d2e424c1-c7a8-4c86-95bf-e15060712d3c] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
I0923 12:06:52.889748 1858014 system_pods.go:89] "snapshot-controller-56fcc65765-qkv8c" [86b09707-775d-426a-b61d-35a3e2e1cab3] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
I0923 12:06:52.889758 1858014 system_pods.go:89] "storage-provisioner" [86af1e72-72ea-4790-8656-e8e6e4c06e40] Running
I0923 12:06:52.889768 1858014 system_pods.go:126] duration metric: took 205.607546ms to wait for k8s-apps to be running ...
I0923 12:06:52.889782 1858014 system_svc.go:44] waiting for kubelet service to be running ....
I0923 12:06:52.889837 1858014 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0923 12:06:52.903655 1858014 system_svc.go:56] duration metric: took 13.862918ms WaitForService to wait for kubelet
I0923 12:06:52.903686 1858014 kubeadm.go:582] duration metric: took 42.452097338s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0923 12:06:52.903710 1858014 node_conditions.go:102] verifying NodePressure condition ...
I0923 12:06:53.070281 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:53.084085 1858014 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0923 12:06:53.084110 1858014 node_conditions.go:123] node cpu capacity is 8
I0923 12:06:53.084123 1858014 node_conditions.go:105] duration metric: took 180.408209ms to run NodePressure ...
I0923 12:06:53.084137 1858014 start.go:241] waiting for startup goroutines ...
I0923 12:06:53.156344 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:53.571392 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:53.656817 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:54.070618 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:54.156502 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:54.571058 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:54.656002 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:55.073789 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:55.171642 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:55.571055 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:55.657068 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:56.071183 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:56.165935 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:56.570222 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:56.656361 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:57.070375 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:57.155639 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:57.571111 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:57.657913 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:58.072983 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:58.156583 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:58.570494 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:58.655896 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:59.070296 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:59.154895 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:06:59.570995 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:06:59.656553 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:00.071307 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:07:00.156021 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:00.570514 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:07:00.656786 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:01.069977 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:07:01.155669 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:01.570096 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:07:01.655648 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:02.071137 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:07:02.155963 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:02.571428 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:07:02.655537 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:03.070546 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:07:03.156717 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:03.571073 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:07:03.656702 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:04.070753 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:07:04.155797 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:04.570886 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:07:04.671881 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:05.070739 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:07:05.156283 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:05.570999 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:07:05.672345 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:06.070730 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:07:06.156451 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:06.571124 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:07:06.655795 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:07.069956 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:07:07.155715 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:07.570801 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:07:07.655990 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:08.070281 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:07:08.155817 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:08.571314 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:07:08.671804 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:09.070889 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:07:09.157180 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:09.571400 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:07:09.656637 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:10.070425 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:07:10.156251 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:10.581515 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:07:10.683104 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:11.070713 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:07:11.156693 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:11.570501 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 12:07:11.656009 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:12.070656 1858014 kapi.go:107] duration metric: took 47.504888946s to wait for kubernetes.io/minikube-addons=csi-hostpath-driver ...
I0923 12:07:12.155735 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:12.655841 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:13.156270 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:13.656340 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:14.155630 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:14.656482 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:15.156131 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:15.655710 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:16.155574 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:16.655622 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:17.156063 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:17.657323 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:18.155767 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:18.655678 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:19.156065 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:19.655737 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:20.156026 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:20.655730 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:21.155793 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:21.656073 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:22.156147 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:22.655811 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:23.155955 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:23.655874 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:24.155720 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:24.655636 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:25.156851 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:25.657336 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:26.157095 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:26.657221 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:27.157193 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:27.656314 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:28.156154 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:28.655938 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:29.189050 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:29.655831 1858014 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 12:07:30.155399 1858014 kapi.go:107] duration metric: took 1m9.503611733s to wait for app.kubernetes.io/name=ingress-nginx ...
I0923 12:07:48.886958 1858014 kapi.go:86] Found 1 Pods for label selector kubernetes.io/minikube-addons=gcp-auth
I0923 12:07:48.886980 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:07:49.386240 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:07:49.886789 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:07:50.386288 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:07:50.886520 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:07:51.385937 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:07:51.886232 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:07:52.386288 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:07:52.886196 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:07:53.386169 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:07:53.886525 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:07:54.385939 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:07:54.886233 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:07:55.386841 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:07:55.885710 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:07:56.385885 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:07:56.885793 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:07:57.386364 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:07:57.886715 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:07:58.385564 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:07:58.885734 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:07:59.386010 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:07:59.886263 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:00.386296 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:00.885996 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:01.386373 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:01.886239 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:02.386218 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:02.886251 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:03.386093 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:03.886393 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:04.386599 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:04.886049 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:05.386476 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:05.885583 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:06.386260 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:06.886434 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:07.386020 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:07.886406 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:08.386884 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:08.886558 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:09.386135 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:09.886531 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:10.386308 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:10.886451 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:11.386887 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:11.886242 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:12.386438 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:12.886415 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:13.386652 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:13.886211 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:14.386292 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:14.886885 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:15.386160 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:15.886688 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:16.385981 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:16.886585 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:17.386232 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:17.886672 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:18.385722 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:18.886054 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:19.386265 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:19.886420 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:20.386592 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:20.885836 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:21.386245 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:21.886431 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:22.386118 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:22.886266 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:23.386455 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:23.886679 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:24.385605 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:24.886218 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:25.386618 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:25.885610 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:26.385927 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:26.885964 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:27.386705 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:27.885710 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:28.385582 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:28.885969 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:29.386108 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:29.886779 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:30.386668 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:30.885889 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:31.385846 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:31.885969 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:32.386295 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:32.886520 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:33.386393 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:33.887013 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:34.386144 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:34.886870 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:35.386387 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:35.887061 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:36.386052 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:36.885635 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:37.386078 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:37.886201 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:38.386346 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:38.886802 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:39.386099 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:39.886086 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:40.386153 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:40.886813 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:41.386759 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:41.885879 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:42.386101 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:42.886107 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:43.386700 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:43.885851 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:44.385662 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:44.885921 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:45.386566 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:45.885848 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:46.385691 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:46.885810 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:47.386264 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:47.886416 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:48.386697 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:48.886237 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:49.386418 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:49.886487 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:50.386441 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:50.886192 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:51.386221 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:51.886174 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:52.386442 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:52.886986 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:53.386622 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:53.886215 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:54.386201 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:54.886854 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:55.388832 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:55.887560 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:56.386478 1858014 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 12:08:56.886917 1858014 kapi.go:107] duration metric: took 2m30.504316393s to wait for kubernetes.io/minikube-addons=gcp-auth ...
I0923 12:08:56.888429 1858014 out.go:177] * Your GCP credentials will now be mounted into every pod created in the addons-242108 cluster.
I0923 12:08:56.889561 1858014 out.go:177] * If you don't want your credentials mounted into a specific pod, add a label with the `gcp-auth-skip-secret` key to your pod configuration.
I0923 12:08:56.890550 1858014 out.go:177] * If you want existing pods to be mounted with credentials, either recreate them or rerun addons enable with --refresh.
I0923 12:08:56.891637 1858014 out.go:177] * Enabled addons: ingress-dns, nvidia-device-plugin, storage-provisioner-rancher, storage-provisioner, default-storageclass, cloud-spanner, metrics-server, inspektor-gadget, volcano, yakd, volumesnapshots, registry, csi-hostpath-driver, ingress, gcp-auth
I0923 12:08:56.892677 1858014 addons.go:510] duration metric: took 2m46.441007464s for enable addons: enabled=[ingress-dns nvidia-device-plugin storage-provisioner-rancher storage-provisioner default-storageclass cloud-spanner metrics-server inspektor-gadget volcano yakd volumesnapshots registry csi-hostpath-driver ingress gcp-auth]
I0923 12:08:56.892726 1858014 start.go:246] waiting for cluster config update ...
I0923 12:08:56.892748 1858014 start.go:255] writing updated cluster config ...
I0923 12:08:56.893020 1858014 ssh_runner.go:195] Run: rm -f paused
I0923 12:08:56.943536 1858014 start.go:600] kubectl: 1.31.1, cluster: 1.31.1 (minor skew: 0)
I0923 12:08:56.945214 1858014 out.go:177] * Done! kubectl is now configured to use "addons-242108" cluster and "default" namespace by default
==> Docker <==
Sep 23 12:18:16 addons-242108 cri-dockerd[1608]: time="2024-09-23T12:18:16Z" level=error msg="Set backoffDuration to : 1m0s for container ID '973ac73e91c79943073fa3a8d4b714869bb69e2c1068e674572393082a953701'"
Sep 23 12:18:16 addons-242108 cri-dockerd[1608]: time="2024-09-23T12:18:16Z" level=error msg="error getting RW layer size for container ID '2bcad0e834eba729b9cecacd9f5f9b3974409675efaa02c1cffa11a44162a1e0': Error response from daemon: No such container: 2bcad0e834eba729b9cecacd9f5f9b3974409675efaa02c1cffa11a44162a1e0"
Sep 23 12:18:16 addons-242108 cri-dockerd[1608]: time="2024-09-23T12:18:16Z" level=error msg="Set backoffDuration to : 1m0s for container ID '2bcad0e834eba729b9cecacd9f5f9b3974409675efaa02c1cffa11a44162a1e0'"
Sep 23 12:18:16 addons-242108 cri-dockerd[1608]: time="2024-09-23T12:18:16Z" level=error msg="error getting RW layer size for container ID 'e674bdc962a7c65bc53a8850712fca3f25d524940c089167649da8deb169c558': Error response from daemon: No such container: e674bdc962a7c65bc53a8850712fca3f25d524940c089167649da8deb169c558"
Sep 23 12:18:16 addons-242108 cri-dockerd[1608]: time="2024-09-23T12:18:16Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'e674bdc962a7c65bc53a8850712fca3f25d524940c089167649da8deb169c558'"
Sep 23 12:18:18 addons-242108 cri-dockerd[1608]: time="2024-09-23T12:18:18Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/ee3b8f53b398609b96cdf1298534a4bf3a72658da7b56e1d4719f88835811866/resolv.conf as [nameserver 10.96.0.10 search default.svc.cluster.local svc.cluster.local cluster.local us-west1-a.c.k8s-minikube.internal c.k8s-minikube.internal google.internal options ndots:5]"
Sep 23 12:18:19 addons-242108 cri-dockerd[1608]: time="2024-09-23T12:18:19Z" level=info msg="Stop pulling image busybox:stable: Status: Downloaded newer image for busybox:stable"
Sep 23 12:18:19 addons-242108 dockerd[1343]: time="2024-09-23T12:18:19.745978695Z" level=info msg="ignoring event" container=252b1e79797b0e222ab59e1244579979bf5c49577a1940769d2de04ba3c86ab8 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 23 12:18:21 addons-242108 dockerd[1343]: time="2024-09-23T12:18:21.676937270Z" level=info msg="ignoring event" container=9739e525d4e8ae6f261093dc142b7799b48a78afeed6e95ab7c6a4d4d23a9fc1 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 23 12:18:21 addons-242108 dockerd[1343]: time="2024-09-23T12:18:21.678530786Z" level=info msg="ignoring event" container=9c57e8bc989f387788d2359dff48fb51e92e1fc17f280ff9156f4e28fa9f4e12 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 23 12:18:21 addons-242108 dockerd[1343]: time="2024-09-23T12:18:21.778710486Z" level=info msg="ignoring event" container=ee3b8f53b398609b96cdf1298534a4bf3a72658da7b56e1d4719f88835811866 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 23 12:18:21 addons-242108 dockerd[1343]: time="2024-09-23T12:18:21.885208801Z" level=info msg="ignoring event" container=ad7eaeb4a9fdfad4205127d3908adc09cad9ab3031c6741101f461d5781aebd8 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 23 12:18:21 addons-242108 dockerd[1343]: time="2024-09-23T12:18:21.952758589Z" level=info msg="ignoring event" container=19b6fa44c244253181e5ab6af193d3433e50dde9bea2dcfeea294781d8f76251 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 23 12:18:22 addons-242108 dockerd[1343]: time="2024-09-23T12:18:22.285513138Z" level=info msg="ignoring event" container=62299439d1cc1db8839b4ec834680afe1049f0e11d97525fca717faa6cf47793 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 23 12:18:22 addons-242108 dockerd[1343]: time="2024-09-23T12:18:22.435589900Z" level=info msg="ignoring event" container=b4d537df4aa017f0001682a19ed8bace78fb6bc0a03692cdb30e75f0ef673f7a module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 23 12:18:23 addons-242108 cri-dockerd[1608]: time="2024-09-23T12:18:23Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/14e35e13f0e3d2ccfdb1e491c3de6e8787e1a6a1f866b0b0b8c958265add8e94/resolv.conf as [nameserver 10.96.0.10 search local-path-storage.svc.cluster.local svc.cluster.local cluster.local us-west1-a.c.k8s-minikube.internal c.k8s-minikube.internal google.internal options ndots:5]"
Sep 23 12:18:23 addons-242108 dockerd[1343]: time="2024-09-23T12:18:23.779650968Z" level=info msg="ignoring event" container=4d6f5b355b6a2be6829bd1e79f35e47f47903819b1da9105f1ed84b737aa4581 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 23 12:18:25 addons-242108 dockerd[1343]: time="2024-09-23T12:18:25.798992231Z" level=info msg="ignoring event" container=14e35e13f0e3d2ccfdb1e491c3de6e8787e1a6a1f866b0b0b8c958265add8e94 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 23 12:18:29 addons-242108 dockerd[1343]: time="2024-09-23T12:18:29.698146183Z" level=info msg="Attempting next endpoint for pull after error: Head \"https://gcr.io/v2/k8s-minikube/busybox/manifests/latest\": unauthorized: authentication failed" spanID=24c074421ce982e4 traceID=5c8a869defcf6be1ebbbcb4ebb9ecc3d
Sep 23 12:18:29 addons-242108 dockerd[1343]: time="2024-09-23T12:18:29.700243550Z" level=error msg="Handler for POST /v1.43/images/create returned error: Head \"https://gcr.io/v2/k8s-minikube/busybox/manifests/latest\": unauthorized: authentication failed" spanID=24c074421ce982e4 traceID=5c8a869defcf6be1ebbbcb4ebb9ecc3d
Sep 23 12:18:49 addons-242108 dockerd[1343]: time="2024-09-23T12:18:49.206773822Z" level=info msg="ignoring event" container=87a5e54af9bf937d3d1ee14f4dda76c97873e490572c1e6a0b902365059e8fdc module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 23 12:18:49 addons-242108 dockerd[1343]: time="2024-09-23T12:18:49.670588708Z" level=info msg="ignoring event" container=66f393a66b36a422b3146b2841c225e7ce3e239551e4422352f4a6aa103b0ef5 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 23 12:18:49 addons-242108 dockerd[1343]: time="2024-09-23T12:18:49.744905630Z" level=info msg="ignoring event" container=2c1dd9e0fcc1a71950d08ca80b398a3229d5ff7c53e115f7883831b142f4c435 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 23 12:18:49 addons-242108 dockerd[1343]: time="2024-09-23T12:18:49.807576578Z" level=info msg="ignoring event" container=a8f73b18ffd35cb94a9811ab7206f84ce4454192aefc86d5a0fda65ec5866f2f module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 23 12:18:49 addons-242108 dockerd[1343]: time="2024-09-23T12:18:49.900014834Z" level=info msg="ignoring event" container=8ba1b49d8e1895e2d1d32fab8136d1509e673d96a9cfe96160537e4673b771cf module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
4d6f5b355b6a2 a416a98b71e22 27 seconds ago Exited helper-pod 0 14e35e13f0e3d helper-pod-delete-pvc-f744a326-bb9b-48c7-a6ae-1eba57c6e796
252b1e79797b0 busybox@sha256:c230832bd3b0be59a6c47ed64294f9ce71e91b327957920b6929a0caa8353140 31 seconds ago Exited busybox 0 ee3b8f53b3986 test-local-path
6dece8442e187 busybox@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79 36 seconds ago Exited helper-pod 0 24fde83762b27 helper-pod-create-pvc-f744a326-bb9b-48c7-a6ae-1eba57c6e796
70f5210702f96 kicbase/echo-server@sha256:127ac38a2bb9537b7f252addff209ea6801edcac8a92c8b1104dacd66a583ed6 41 seconds ago Running hello-world-app 0 b6dc2fa4dde86 hello-world-app-55bf9c44b4-9hz8m
7a030bd4d94b7 nginx@sha256:a5127daff3d6f4606be3100a252419bfa84fd6ee5cd74d0feaca1a5068f97dcf 51 seconds ago Running nginx 0 570a7690b9f3b nginx
fd300055d1102 gcr.io/k8s-minikube/gcp-auth-webhook@sha256:e6c5b3bc32072ea370d34c27836efd11b3519d25bd444c2a8efc339cff0e20fb 9 minutes ago Running gcp-auth 0 46325eeab6877 gcp-auth-89d5ffd79-rwktj
2694ee61dd4b2 ce263a8653f9c 11 minutes ago Exited patch 2 665b7fa3c8d3b ingress-nginx-admission-patch-gtrrn
f90c270cea853 registry.k8s.io/ingress-nginx/kube-webhook-certgen@sha256:a320a50cc91bd15fd2d6fa6de58bd98c1bd64b9a6f926ce23a600d87043455a3 11 minutes ago Exited create 0 ea3cf7cc1629f ingress-nginx-admission-create-nbjdm
7169af9e86965 rancher/local-path-provisioner@sha256:e34c88ae0affb1cdefbb874140d6339d4a27ec4ee420ae8199cd839997b05246 12 minutes ago Running local-path-provisioner 0 bab984d9f8b0a local-path-provisioner-86d989889c-ddcjw
9c4ac22159e6d 6e38f40d628db 12 minutes ago Running storage-provisioner 0 bc92d9331255c storage-provisioner
9d62c9fb817ee c69fa2e9cbf5f 12 minutes ago Running coredns 0 1f1c9757372b2 coredns-7c65d6cfc9-mv6mb
6335cb35db6c2 60c005f310ff3 12 minutes ago Running kube-proxy 0 a652a0d6d7f1c kube-proxy-ftvdd
fffe4f5c9485a 175ffd71cce3d 12 minutes ago Running kube-controller-manager 0 18eec7e86333b kube-controller-manager-addons-242108
f13313f58364c 9aa1fad941575 12 minutes ago Running kube-scheduler 0 7bd33725396e0 kube-scheduler-addons-242108
738958701cfa2 2e96e5913fc06 12 minutes ago Running etcd 0 5fe26f0e05801 etcd-addons-242108
cbb83f797f931 6bab7719df100 12 minutes ago Running kube-apiserver 0 37ef638b64351 kube-apiserver-addons-242108
==> coredns [9d62c9fb817e] <==
[INFO] 10.244.0.7:50708 - 59820 "A IN registry.kube-system.svc.cluster.local.svc.cluster.local. udp 74 false 512" NXDOMAIN qr,aa,rd 167 0.000250946s
[INFO] 10.244.0.7:46676 - 63966 "A IN registry.kube-system.svc.cluster.local.cluster.local. udp 70 false 512" NXDOMAIN qr,aa,rd 163 0.000129717s
[INFO] 10.244.0.7:46676 - 32476 "AAAA IN registry.kube-system.svc.cluster.local.cluster.local. udp 70 false 512" NXDOMAIN qr,aa,rd 163 0.000215126s
[INFO] 10.244.0.7:51210 - 4231 "AAAA IN registry.kube-system.svc.cluster.local.us-west1-a.c.k8s-minikube.internal. udp 91 false 512" NXDOMAIN qr,rd,ra 91 0.004380832s
[INFO] 10.244.0.7:51210 - 48260 "A IN registry.kube-system.svc.cluster.local.us-west1-a.c.k8s-minikube.internal. udp 91 false 512" NXDOMAIN qr,rd,ra 91 0.004430507s
[INFO] 10.244.0.7:55196 - 32493 "A IN registry.kube-system.svc.cluster.local.c.k8s-minikube.internal. udp 80 false 512" NXDOMAIN qr,rd,ra 80 0.00466985s
[INFO] 10.244.0.7:55196 - 16616 "AAAA IN registry.kube-system.svc.cluster.local.c.k8s-minikube.internal. udp 80 false 512" NXDOMAIN qr,rd,ra 80 0.006035977s
[INFO] 10.244.0.7:42963 - 56721 "AAAA IN registry.kube-system.svc.cluster.local.google.internal. udp 72 false 512" NXDOMAIN qr,rd,ra 72 0.004948853s
[INFO] 10.244.0.7:42963 - 34284 "A IN registry.kube-system.svc.cluster.local.google.internal. udp 72 false 512" NXDOMAIN qr,rd,ra 72 0.004908912s
[INFO] 10.244.0.7:45598 - 32009 "AAAA IN registry.kube-system.svc.cluster.local. udp 56 false 512" NOERROR qr,aa,rd 149 0.000095262s
[INFO] 10.244.0.7:45598 - 6155 "A IN registry.kube-system.svc.cluster.local. udp 56 false 512" NOERROR qr,aa,rd 110 0.000129173s
[INFO] 10.244.0.25:45363 - 51849 "AAAA IN storage.googleapis.com.gcp-auth.svc.cluster.local. udp 78 false 1232" NXDOMAIN qr,aa,rd 160 0.000404754s
[INFO] 10.244.0.25:44757 - 36905 "A IN storage.googleapis.com.gcp-auth.svc.cluster.local. udp 78 false 1232" NXDOMAIN qr,aa,rd 160 0.000417862s
[INFO] 10.244.0.25:39391 - 6527 "AAAA IN storage.googleapis.com.svc.cluster.local. udp 69 false 1232" NXDOMAIN qr,aa,rd 151 0.000132925s
[INFO] 10.244.0.25:50575 - 34209 "A IN storage.googleapis.com.svc.cluster.local. udp 69 false 1232" NXDOMAIN qr,aa,rd 151 0.000104286s
[INFO] 10.244.0.25:50002 - 4011 "A IN storage.googleapis.com.cluster.local. udp 65 false 1232" NXDOMAIN qr,aa,rd 147 0.000121936s
[INFO] 10.244.0.25:43498 - 5127 "AAAA IN storage.googleapis.com.cluster.local. udp 65 false 1232" NXDOMAIN qr,aa,rd 147 0.00012984s
[INFO] 10.244.0.25:35101 - 22284 "A IN storage.googleapis.com.us-west1-a.c.k8s-minikube.internal. udp 86 false 1232" NXDOMAIN qr,rd,ra 75 0.007837418s
[INFO] 10.244.0.25:60552 - 52996 "AAAA IN storage.googleapis.com.us-west1-a.c.k8s-minikube.internal. udp 86 false 1232" NXDOMAIN qr,rd,ra 75 0.0084815s
[INFO] 10.244.0.25:40402 - 17699 "AAAA IN storage.googleapis.com.c.k8s-minikube.internal. udp 75 false 1232" NXDOMAIN qr,rd,ra 64 0.006722285s
[INFO] 10.244.0.25:58131 - 21342 "A IN storage.googleapis.com.c.k8s-minikube.internal. udp 75 false 1232" NXDOMAIN qr,rd,ra 64 0.008780025s
[INFO] 10.244.0.25:40976 - 42114 "A IN storage.googleapis.com.google.internal. udp 67 false 1232" NXDOMAIN qr,rd,ra 56 0.005343227s
[INFO] 10.244.0.25:40644 - 55620 "AAAA IN storage.googleapis.com.google.internal. udp 67 false 1232" NXDOMAIN qr,rd,ra 56 0.005378817s
[INFO] 10.244.0.25:57052 - 6776 "A IN storage.googleapis.com. udp 51 false 1232" NOERROR qr,rd,ra 496 0.001933423s
[INFO] 10.244.0.25:39467 - 51151 "AAAA IN storage.googleapis.com. udp 51 false 1232" NOERROR qr,rd,ra 240 0.002111407s
==> describe nodes <==
Name: addons-242108
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=addons-242108
kubernetes.io/os=linux
minikube.k8s.io/commit=30f673d6edb6d12f8aba2f7e30667ea1b6d205d1
minikube.k8s.io/name=addons-242108
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2024_09_23T12_06_06_0700
minikube.k8s.io/version=v1.34.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
topology.hostpath.csi/node=addons-242108
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///var/run/cri-dockerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Mon, 23 Sep 2024 12:06:03 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: addons-242108
AcquireTime: <unset>
RenewTime: Mon, 23 Sep 2024 12:18:50 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Mon, 23 Sep 2024 12:18:40 +0000 Mon, 23 Sep 2024 12:06:01 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Mon, 23 Sep 2024 12:18:40 +0000 Mon, 23 Sep 2024 12:06:01 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Mon, 23 Sep 2024 12:18:40 +0000 Mon, 23 Sep 2024 12:06:01 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Mon, 23 Sep 2024 12:18:40 +0000 Mon, 23 Sep 2024 12:06:03 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.2
Hostname: addons-242108
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32859316Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32859316Ki
pods: 110
System Info:
Machine ID: dd3dbbee0d064727bb85ed02413410df
System UUID: aa4456a4-646b-4a2e-a73d-669a478f207b
Boot ID: d656bf56-7ae7-4b68-8543-65e0db596769
Kernel Version: 5.15.0-1069-gcp
OS Image: Ubuntu 22.04.5 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://27.3.0
Kubelet Version: v1.31.1
Kube-Proxy Version: v1.31.1
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (12 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 9m14s
default hello-world-app-55bf9c44b4-9hz8m 0 (0%) 0 (0%) 0 (0%) 0 (0%) 43s
default nginx 0 (0%) 0 (0%) 0 (0%) 0 (0%) 55s
gcp-auth gcp-auth-89d5ffd79-rwktj 0 (0%) 0 (0%) 0 (0%) 0 (0%) 11m
kube-system coredns-7c65d6cfc9-mv6mb 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 12m
kube-system etcd-addons-242108 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 12m
kube-system kube-apiserver-addons-242108 250m (3%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system kube-controller-manager-addons-242108 200m (2%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system kube-proxy-ftvdd 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system kube-scheduler-addons-242108 100m (1%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12m
local-path-storage local-path-provisioner-86d989889c-ddcjw 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12m
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 750m (9%) 0 (0%)
memory 170Mi (0%) 170Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 12m kube-proxy
Normal Starting 12m kubelet Starting kubelet.
Warning CgroupV1 12m kubelet Cgroup v1 support is in maintenance mode, please migrate to Cgroup v2.
Normal NodeAllocatableEnforced 12m kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 12m kubelet Node addons-242108 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 12m kubelet Node addons-242108 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 12m kubelet Node addons-242108 status is now: NodeHasSufficientPID
Normal RegisteredNode 12m node-controller Node addons-242108 event: Registered Node addons-242108 in Controller
==> dmesg <==
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff 02 82 9d 7f a1 c8 08 06
[ +1.795813] IPv4: martian source 10.244.0.1 from 10.244.0.15, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff d2 67 2f dc 8b b1 08 06
[ +2.761992] IPv4: martian source 10.244.0.1 from 10.244.0.16, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff a6 a4 e2 d4 8f 12 08 06
[ +1.824766] IPv4: martian source 10.244.0.1 from 10.244.0.17, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 02 60 9c 61 21 50 08 06
[ +5.738963] IPv4: martian source 10.244.0.1 from 10.244.0.19, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff a6 e2 11 2a 7e c2 08 06
[ +0.033225] IPv4: martian source 10.244.0.1 from 10.244.0.20, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff de 2a 44 72 3f 9d 08 06
[ +0.169115] IPv4: martian source 10.244.0.1 from 10.244.0.18, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 1e 6a d6 28 9d 14 08 06
[Sep23 12:07] IPv4: martian source 10.244.0.1 from 10.244.0.21, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 76 ae b3 4a 7f 4a 08 06
[Sep23 12:08] IPv4: martian source 10.244.0.1 from 10.244.0.23, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 12 82 c4 ff 6b c7 08 06
[ +0.081441] IPv4: martian source 10.244.0.1 from 10.244.0.24, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff de 0a ae f4 a2 17 08 06
[ +25.774793] IPv4: martian source 10.244.0.1 from 10.244.0.25, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff 7a 9f 9c eb 6c 92 08 06
[ +0.012614] IPv4: martian source 10.244.0.25 from 10.244.0.3, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 7a 40 db 7b a7 9f 08 06
[Sep23 12:17] IPv4: martian source 10.244.0.31 from 10.244.0.21, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff 76 ae b3 4a 7f 4a 08 06
==> etcd [738958701cfa] <==
{"level":"info","ts":"2024-09-23T12:06:01.455765Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgPreVoteResp from aec36adc501070cc at term 1"}
{"level":"info","ts":"2024-09-23T12:06:01.455798Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became candidate at term 2"}
{"level":"info","ts":"2024-09-23T12:06:01.455807Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgVoteResp from aec36adc501070cc at term 2"}
{"level":"info","ts":"2024-09-23T12:06:01.455825Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became leader at term 2"}
{"level":"info","ts":"2024-09-23T12:06:01.455836Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: aec36adc501070cc elected leader aec36adc501070cc at term 2"}
{"level":"info","ts":"2024-09-23T12:06:01.456707Z","caller":"etcdserver/server.go:2629","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2024-09-23T12:06:01.457395Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2024-09-23T12:06:01.457394Z","caller":"etcdserver/server.go:2118","msg":"published local member to cluster through raft","local-member-id":"aec36adc501070cc","local-member-attributes":"{Name:addons-242108 ClientURLs:[https://192.168.49.2:2379]}","request-path":"/0/members/aec36adc501070cc/attributes","cluster-id":"fa54960ea34d58be","publish-timeout":"7s"}
{"level":"info","ts":"2024-09-23T12:06:01.457438Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2024-09-23T12:06:01.457606Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","cluster-version":"3.5"}
{"level":"info","ts":"2024-09-23T12:06:01.457658Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2024-09-23T12:06:01.457677Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2024-09-23T12:06:01.457718Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2024-09-23T12:06:01.457749Z","caller":"etcdserver/server.go:2653","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2024-09-23T12:06:01.458643Z","caller":"v3rpc/health.go:61","msg":"grpc service status changed","service":"","status":"SERVING"}
{"level":"info","ts":"2024-09-23T12:06:01.458725Z","caller":"v3rpc/health.go:61","msg":"grpc service status changed","service":"","status":"SERVING"}
{"level":"info","ts":"2024-09-23T12:06:01.459578Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.49.2:2379"}
{"level":"info","ts":"2024-09-23T12:06:01.459851Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2024-09-23T12:06:18.257843Z","caller":"traceutil/trace.go:171","msg":"trace[921347969] linearizableReadLoop","detail":"{readStateIndex:580; appliedIndex:579; }","duration":"101.514517ms","start":"2024-09-23T12:06:18.156310Z","end":"2024-09-23T12:06:18.257824Z","steps":["trace[921347969] 'read index received' (duration: 4.094629ms)","trace[921347969] 'applied index is now lower than readState.Index' (duration: 97.419233ms)"],"step_count":2}
{"level":"info","ts":"2024-09-23T12:06:18.258105Z","caller":"traceutil/trace.go:171","msg":"trace[1506143601] transaction","detail":"{read_only:false; response_revision:566; number_of_response:1; }","duration":"103.330433ms","start":"2024-09-23T12:06:18.154759Z","end":"2024-09-23T12:06:18.258089Z","steps":["trace[1506143601] 'process raft request' (duration: 102.962295ms)"],"step_count":1}
{"level":"warn","ts":"2024-09-23T12:06:18.258324Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"101.992027ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/health\" ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2024-09-23T12:06:18.258387Z","caller":"traceutil/trace.go:171","msg":"trace[732255825] range","detail":"{range_begin:/registry/health; range_end:; response_count:0; response_revision:566; }","duration":"102.066206ms","start":"2024-09-23T12:06:18.156305Z","end":"2024-09-23T12:06:18.258371Z","steps":["trace[732255825] 'agreement among raft nodes before linearized reading' (duration: 101.964684ms)"],"step_count":1}
{"level":"info","ts":"2024-09-23T12:16:01.777063Z","caller":"mvcc/index.go:214","msg":"compact tree index","revision":1856}
{"level":"info","ts":"2024-09-23T12:16:01.802307Z","caller":"mvcc/kvstore_compaction.go:69","msg":"finished scheduled compaction","compact-revision":1856,"took":"24.648191ms","hash":1199597141,"current-db-size-bytes":8941568,"current-db-size":"8.9 MB","current-db-size-in-use-bytes":4853760,"current-db-size-in-use":"4.9 MB"}
{"level":"info","ts":"2024-09-23T12:16:01.802363Z","caller":"mvcc/hash.go:137","msg":"storing new hash","hash":1199597141,"revision":1856,"compact-revision":-1}
==> gcp-auth [fd300055d110] <==
2024/09/23 12:09:36 Ready to write response ...
2024/09/23 12:09:36 Ready to marshal response ...
2024/09/23 12:09:36 Ready to write response ...
2024/09/23 12:17:39 Ready to marshal response ...
2024/09/23 12:17:39 Ready to write response ...
2024/09/23 12:17:39 Ready to marshal response ...
2024/09/23 12:17:39 Ready to write response ...
2024/09/23 12:17:39 Ready to marshal response ...
2024/09/23 12:17:39 Ready to write response ...
2024/09/23 12:17:48 Ready to marshal response ...
2024/09/23 12:17:48 Ready to write response ...
2024/09/23 12:17:49 Ready to marshal response ...
2024/09/23 12:17:49 Ready to write response ...
2024/09/23 12:17:55 Ready to marshal response ...
2024/09/23 12:17:55 Ready to write response ...
2024/09/23 12:18:05 Ready to marshal response ...
2024/09/23 12:18:05 Ready to write response ...
2024/09/23 12:18:07 Ready to marshal response ...
2024/09/23 12:18:07 Ready to write response ...
2024/09/23 12:18:12 Ready to marshal response ...
2024/09/23 12:18:12 Ready to write response ...
2024/09/23 12:18:12 Ready to marshal response ...
2024/09/23 12:18:12 Ready to write response ...
2024/09/23 12:18:23 Ready to marshal response ...
2024/09/23 12:18:23 Ready to write response ...
==> kernel <==
12:18:50 up 1 day, 20:00, 0 users, load average: 0.27, 0.47, 1.35
Linux addons-242108 5.15.0-1069-gcp #77~20.04.1-Ubuntu SMP Sun Sep 1 19:39:16 UTC 2024 x86_64 x86_64 x86_64 GNU/Linux
PRETTY_NAME="Ubuntu 22.04.5 LTS"
==> kube-apiserver [cbb83f797f93] <==
W0923 12:09:28.773880 1 cacher.go:171] Terminating all watchers from cacher jobs.batch.volcano.sh
W0923 12:09:29.075953 1 cacher.go:171] Terminating all watchers from cacher jobflows.flow.volcano.sh
W0923 12:09:29.479148 1 cacher.go:171] Terminating all watchers from cacher jobtemplates.flow.volcano.sh
I0923 12:17:39.572758 1 alloc.go:330] "allocated clusterIPs" service="headlamp/headlamp" clusterIPs={"IPv4":"10.108.98.227"}
I0923 12:17:55.790401 1 controller.go:615] quota admission added evaluator for: ingresses.networking.k8s.io
I0923 12:17:55.957693 1 alloc.go:330] "allocated clusterIPs" service="default/nginx" clusterIPs={"IPv4":"10.98.100.163"}
I0923 12:17:56.361819 1 controller.go:615] quota admission added evaluator for: volumesnapshots.snapshot.storage.k8s.io
I0923 12:18:07.194728 1 handler.go:286] Adding GroupVersion gadget.kinvolk.io v1alpha1 to ResourceManager
I0923 12:18:07.563085 1 alloc.go:330] "allocated clusterIPs" service="default/hello-world-app" clusterIPs={"IPv4":"10.109.61.205"}
W0923 12:18:08.208894 1 cacher.go:171] Terminating all watchers from cacher traces.gadget.kinvolk.io
I0923 12:18:21.528445 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0923 12:18:21.528512 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0923 12:18:21.540638 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0923 12:18:21.540699 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0923 12:18:21.542160 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0923 12:18:21.542205 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0923 12:18:21.555535 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0923 12:18:21.555587 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0923 12:18:21.575590 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0923 12:18:21.575634 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
W0923 12:18:22.543116 1 cacher.go:171] Terminating all watchers from cacher volumesnapshotclasses.snapshot.storage.k8s.io
W0923 12:18:22.576580 1 cacher.go:171] Terminating all watchers from cacher volumesnapshots.snapshot.storage.k8s.io
W0923 12:18:22.664371 1 cacher.go:171] Terminating all watchers from cacher volumesnapshotcontents.snapshot.storage.k8s.io
E0923 12:18:39.131839 1 authentication.go:73] "Unable to authenticate the request" err="[invalid bearer token, serviceaccounts \"local-path-provisioner-service-account\" not found]"
I0923 12:18:43.804594 1 controller.go:129] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Nothing (removed from the queue).
==> kube-controller-manager [fffe4f5c9485] <==
E0923 12:18:29.436801 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0923 12:18:31.110182 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0923 12:18:31.110227 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0923 12:18:32.238797 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0923 12:18:32.238840 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0923 12:18:34.276946 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0923 12:18:34.276990 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0923 12:18:35.636744 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0923 12:18:35.636798 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0923 12:18:36.624570 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0923 12:18:36.624621 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0923 12:18:38.696560 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0923 12:18:38.696609 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0923 12:18:39.292000 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0923 12:18:39.292051 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
I0923 12:18:40.552158 1 shared_informer.go:313] Waiting for caches to sync for resource quota
I0923 12:18:40.552196 1 shared_informer.go:320] Caches are synced for resource quota
I0923 12:18:40.763569 1 shared_informer.go:313] Waiting for caches to sync for garbage collector
I0923 12:18:40.763611 1 shared_informer.go:320] Caches are synced for garbage collector
I0923 12:18:40.787516 1 range_allocator.go:241] "Successfully synced" logger="node-ipam-controller" key="addons-242108"
W0923 12:18:42.300421 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0923 12:18:42.300466 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0923 12:18:43.853573 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0923 12:18:43.853620 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
I0923 12:18:49.632747 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/registry-66c9cd494c" duration="4.698µs"
==> kube-proxy [6335cb35db6c] <==
I0923 12:06:13.956680 1 server_linux.go:66] "Using iptables proxy"
I0923 12:06:14.555437 1 server.go:677] "Successfully retrieved node IP(s)" IPs=["192.168.49.2"]
E0923 12:06:14.555515 1 server.go:234] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I0923 12:06:14.867393 1 server.go:243] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I0923 12:06:14.867480 1 server_linux.go:169] "Using iptables Proxier"
I0923 12:06:14.953107 1 proxier.go:255] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I0923 12:06:14.955461 1 server.go:483] "Version info" version="v1.31.1"
I0923 12:06:14.955492 1 server.go:485] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0923 12:06:14.958443 1 config.go:199] "Starting service config controller"
I0923 12:06:14.958480 1 shared_informer.go:313] Waiting for caches to sync for service config
I0923 12:06:15.053590 1 config.go:105] "Starting endpoint slice config controller"
I0923 12:06:15.053620 1 shared_informer.go:313] Waiting for caches to sync for endpoint slice config
I0923 12:06:15.061840 1 config.go:328] "Starting node config controller"
I0923 12:06:15.061865 1 shared_informer.go:313] Waiting for caches to sync for node config
I0923 12:06:15.151774 1 shared_informer.go:320] Caches are synced for service config
I0923 12:06:15.161730 1 shared_informer.go:320] Caches are synced for endpoint slice config
I0923 12:06:15.162073 1 shared_informer.go:320] Caches are synced for node config
==> kube-scheduler [f13313f58364] <==
W0923 12:06:03.260067 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E0923 12:06:03.261045 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0923 12:06:03.260208 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E0923 12:06:03.261075 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0923 12:06:03.260278 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E0923 12:06:03.261102 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W0923 12:06:03.266140 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
W0923 12:06:03.266440 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
W0923 12:06:03.266550 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
W0923 12:06:03.266174 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E0923 12:06:03.266714 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csinodes\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
E0923 12:06:03.266387 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError"
E0923 12:06:03.266836 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User \"system:kube-scheduler\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0923 12:06:03.266272 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E0923 12:06:03.267224 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError"
E0923 12:06:03.266585 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User \"system:kube-scheduler\" cannot list resource \"pods\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0923 12:06:04.177579 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E0923 12:06:04.177638 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError"
W0923 12:06:04.210005 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E0923 12:06:04.210061 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError"
W0923 12:06:04.302959 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E0923 12:06:04.303000 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User \"system:kube-scheduler\" cannot list resource \"pods\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0923 12:06:04.338456 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E0923 12:06:04.338499 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
I0923 12:06:04.656658 1 shared_informer.go:320] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Sep 23 12:18:26 addons-242108 kubelet[2443]: I0923 12:18:26.717954 2443 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="14e35e13f0e3d2ccfdb1e491c3de6e8787e1a6a1f866b0b0b8c958265add8e94"
Sep 23 12:18:29 addons-242108 kubelet[2443]: I0923 12:18:29.565782 2443 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41cc77f7-1c7e-4f22-9f84-665010b11656" path="/var/lib/kubelet/pods/41cc77f7-1c7e-4f22-9f84-665010b11656/volumes"
Sep 23 12:18:29 addons-242108 kubelet[2443]: E0923 12:18:29.700809 2443 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = Error response from daemon: Head \"https://gcr.io/v2/k8s-minikube/busybox/manifests/latest\": unauthorized: authentication failed" image="gcr.io/k8s-minikube/busybox:latest"
Sep 23 12:18:29 addons-242108 kubelet[2443]: E0923 12:18:29.700967 2443 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:registry-test,Image:gcr.io/k8s-minikube/busybox,Command:[],Args:[sh -c wget --spider -S http://registry.kube-system.svc.cluster.local],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:GOOGLE_APPLICATION_CREDENTIALS,Value:/google-app-creds.json,ValueFrom:nil,},EnvVar{Name:PROJECT_ID,Value:this_is_fake,ValueFrom:nil,},EnvVar{Name:GCP_PROJECT,Value:this_is_fake,ValueFrom:nil,},EnvVar{Name:GCLOUD_PROJECT,Value:this_is_fake,ValueFrom:nil,},EnvVar{Name:GOOGLE_CLOUD_PROJECT,Value:this_is_fake,ValueFrom:nil,},EnvVar{Name:CLOUDSDK_CORE_PROJECT,Value:this_is_fake,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-mtqh7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,Su
bPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:gcp-creds,ReadOnly:true,MountPath:/google-app-creds.json,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:nil,Stdin:true,StdinOnce:true,TTY:true,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod registry-test_default(abd52e85-cf29-4a0b-b91c-04d955ebe85c): ErrImagePull: Error response from daemon: Head \"https://gcr.io/v2/k8s-minikube/busybox/manifests/latest\": unauthorized: authentication failed" logger="UnhandledError"
Sep 23 12:18:29 addons-242108 kubelet[2443]: E0923 12:18:29.702086 2443 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"registry-test\" with ErrImagePull: \"Error response from daemon: Head \\\"https://gcr.io/v2/k8s-minikube/busybox/manifests/latest\\\": unauthorized: authentication failed\"" pod="default/registry-test" podUID="abd52e85-cf29-4a0b-b91c-04d955ebe85c"
Sep 23 12:18:30 addons-242108 kubelet[2443]: E0923 12:18:30.557773 2443 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"busybox\" with ImagePullBackOff: \"Back-off pulling image \\\"gcr.io/k8s-minikube/busybox:1.28.4-glibc\\\"\"" pod="default/busybox" podUID="185ee685-20ef-4456-9362-1ffca5abc5ce"
Sep 23 12:18:41 addons-242108 kubelet[2443]: E0923 12:18:41.560720 2443 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"registry-test\" with ImagePullBackOff: \"Back-off pulling image \\\"gcr.io/k8s-minikube/busybox\\\"\"" pod="default/registry-test" podUID="abd52e85-cf29-4a0b-b91c-04d955ebe85c"
Sep 23 12:18:44 addons-242108 kubelet[2443]: E0923 12:18:44.557422 2443 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"busybox\" with ImagePullBackOff: \"Back-off pulling image \\\"gcr.io/k8s-minikube/busybox:1.28.4-glibc\\\"\"" pod="default/busybox" podUID="185ee685-20ef-4456-9362-1ffca5abc5ce"
Sep 23 12:18:49 addons-242108 kubelet[2443]: I0923 12:18:49.403906 2443 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mtqh7\" (UniqueName: \"kubernetes.io/projected/abd52e85-cf29-4a0b-b91c-04d955ebe85c-kube-api-access-mtqh7\") pod \"abd52e85-cf29-4a0b-b91c-04d955ebe85c\" (UID: \"abd52e85-cf29-4a0b-b91c-04d955ebe85c\") "
Sep 23 12:18:49 addons-242108 kubelet[2443]: I0923 12:18:49.403958 2443 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"gcp-creds\" (UniqueName: \"kubernetes.io/host-path/abd52e85-cf29-4a0b-b91c-04d955ebe85c-gcp-creds\") pod \"abd52e85-cf29-4a0b-b91c-04d955ebe85c\" (UID: \"abd52e85-cf29-4a0b-b91c-04d955ebe85c\") "
Sep 23 12:18:49 addons-242108 kubelet[2443]: I0923 12:18:49.404062 2443 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/abd52e85-cf29-4a0b-b91c-04d955ebe85c-gcp-creds" (OuterVolumeSpecName: "gcp-creds") pod "abd52e85-cf29-4a0b-b91c-04d955ebe85c" (UID: "abd52e85-cf29-4a0b-b91c-04d955ebe85c"). InnerVolumeSpecName "gcp-creds". PluginName "kubernetes.io/host-path", VolumeGidValue ""
Sep 23 12:18:49 addons-242108 kubelet[2443]: I0923 12:18:49.405783 2443 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/abd52e85-cf29-4a0b-b91c-04d955ebe85c-kube-api-access-mtqh7" (OuterVolumeSpecName: "kube-api-access-mtqh7") pod "abd52e85-cf29-4a0b-b91c-04d955ebe85c" (UID: "abd52e85-cf29-4a0b-b91c-04d955ebe85c"). InnerVolumeSpecName "kube-api-access-mtqh7". PluginName "kubernetes.io/projected", VolumeGidValue ""
Sep 23 12:18:49 addons-242108 kubelet[2443]: I0923 12:18:49.504285 2443 reconciler_common.go:288] "Volume detached for volume \"kube-api-access-mtqh7\" (UniqueName: \"kubernetes.io/projected/abd52e85-cf29-4a0b-b91c-04d955ebe85c-kube-api-access-mtqh7\") on node \"addons-242108\" DevicePath \"\""
Sep 23 12:18:49 addons-242108 kubelet[2443]: I0923 12:18:49.504322 2443 reconciler_common.go:288] "Volume detached for volume \"gcp-creds\" (UniqueName: \"kubernetes.io/host-path/abd52e85-cf29-4a0b-b91c-04d955ebe85c-gcp-creds\") on node \"addons-242108\" DevicePath \"\""
Sep 23 12:18:49 addons-242108 kubelet[2443]: I0923 12:18:49.975922 2443 scope.go:117] "RemoveContainer" containerID="66f393a66b36a422b3146b2841c225e7ce3e239551e4422352f4a6aa103b0ef5"
Sep 23 12:18:49 addons-242108 kubelet[2443]: I0923 12:18:49.996440 2443 scope.go:117] "RemoveContainer" containerID="66f393a66b36a422b3146b2841c225e7ce3e239551e4422352f4a6aa103b0ef5"
Sep 23 12:18:49 addons-242108 kubelet[2443]: E0923 12:18:49.997535 2443 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: 66f393a66b36a422b3146b2841c225e7ce3e239551e4422352f4a6aa103b0ef5" containerID="66f393a66b36a422b3146b2841c225e7ce3e239551e4422352f4a6aa103b0ef5"
Sep 23 12:18:49 addons-242108 kubelet[2443]: I0923 12:18:49.997707 2443 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"docker","ID":"66f393a66b36a422b3146b2841c225e7ce3e239551e4422352f4a6aa103b0ef5"} err="failed to get container status \"66f393a66b36a422b3146b2841c225e7ce3e239551e4422352f4a6aa103b0ef5\": rpc error: code = Unknown desc = Error response from daemon: No such container: 66f393a66b36a422b3146b2841c225e7ce3e239551e4422352f4a6aa103b0ef5"
Sep 23 12:18:49 addons-242108 kubelet[2443]: I0923 12:18:49.997799 2443 scope.go:117] "RemoveContainer" containerID="2c1dd9e0fcc1a71950d08ca80b398a3229d5ff7c53e115f7883831b142f4c435"
Sep 23 12:18:50 addons-242108 kubelet[2443]: I0923 12:18:50.007433 2443 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gjrdx\" (UniqueName: \"kubernetes.io/projected/9c3d099a-ff8c-4fd5-8178-5ad9ed8d1f67-kube-api-access-gjrdx\") pod \"9c3d099a-ff8c-4fd5-8178-5ad9ed8d1f67\" (UID: \"9c3d099a-ff8c-4fd5-8178-5ad9ed8d1f67\") "
Sep 23 12:18:50 addons-242108 kubelet[2443]: I0923 12:18:50.009266 2443 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c3d099a-ff8c-4fd5-8178-5ad9ed8d1f67-kube-api-access-gjrdx" (OuterVolumeSpecName: "kube-api-access-gjrdx") pod "9c3d099a-ff8c-4fd5-8178-5ad9ed8d1f67" (UID: "9c3d099a-ff8c-4fd5-8178-5ad9ed8d1f67"). InnerVolumeSpecName "kube-api-access-gjrdx". PluginName "kubernetes.io/projected", VolumeGidValue ""
Sep 23 12:18:50 addons-242108 kubelet[2443]: I0923 12:18:50.108743 2443 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xr6v8\" (UniqueName: \"kubernetes.io/projected/a13fee98-c1a9-4518-844c-031a36bf17b4-kube-api-access-xr6v8\") pod \"a13fee98-c1a9-4518-844c-031a36bf17b4\" (UID: \"a13fee98-c1a9-4518-844c-031a36bf17b4\") "
Sep 23 12:18:50 addons-242108 kubelet[2443]: I0923 12:18:50.108824 2443 reconciler_common.go:288] "Volume detached for volume \"kube-api-access-gjrdx\" (UniqueName: \"kubernetes.io/projected/9c3d099a-ff8c-4fd5-8178-5ad9ed8d1f67-kube-api-access-gjrdx\") on node \"addons-242108\" DevicePath \"\""
Sep 23 12:18:50 addons-242108 kubelet[2443]: I0923 12:18:50.110696 2443 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a13fee98-c1a9-4518-844c-031a36bf17b4-kube-api-access-xr6v8" (OuterVolumeSpecName: "kube-api-access-xr6v8") pod "a13fee98-c1a9-4518-844c-031a36bf17b4" (UID: "a13fee98-c1a9-4518-844c-031a36bf17b4"). InnerVolumeSpecName "kube-api-access-xr6v8". PluginName "kubernetes.io/projected", VolumeGidValue ""
Sep 23 12:18:50 addons-242108 kubelet[2443]: I0923 12:18:50.209263 2443 reconciler_common.go:288] "Volume detached for volume \"kube-api-access-xr6v8\" (UniqueName: \"kubernetes.io/projected/a13fee98-c1a9-4518-844c-031a36bf17b4-kube-api-access-xr6v8\") on node \"addons-242108\" DevicePath \"\""
==> storage-provisioner [9c4ac22159e6] <==
I0923 12:06:19.963801 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I0923 12:06:20.147055 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I0923 12:06:20.147111 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I0923 12:06:20.349946 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I0923 12:06:20.350602 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"49777791-c22b-4c5c-a8b8-85cee0dfa538", APIVersion:"v1", ResourceVersion:"645", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' addons-242108_8e2c7e47-7aa4-494c-b203-18de92fee909 became leader
I0923 12:06:20.350646 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_addons-242108_8e2c7e47-7aa4-494c-b203-18de92fee909!
I0923 12:06:20.451428 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_addons-242108_8e2c7e47-7aa4-494c-b203-18de92fee909!
-- /stdout --
helpers_test.go:254: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p addons-242108 -n addons-242108
helpers_test.go:261: (dbg) Run: kubectl --context addons-242108 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:272: non-running pods: busybox
helpers_test.go:274: ======> post-mortem[TestAddons/parallel/Registry]: describe non-running pods <======
helpers_test.go:277: (dbg) Run: kubectl --context addons-242108 describe pod busybox
helpers_test.go:282: (dbg) kubectl --context addons-242108 describe pod busybox:
-- stdout --
Name: busybox
Namespace: default
Priority: 0
Service Account: default
Node: addons-242108/192.168.49.2
Start Time: Mon, 23 Sep 2024 12:09:36 +0000
Labels: integration-test=busybox
Annotations: <none>
Status: Pending
IP: 10.244.0.27
IPs:
IP: 10.244.0.27
Containers:
busybox:
Container ID:
Image: gcr.io/k8s-minikube/busybox:1.28.4-glibc
Image ID:
Port: <none>
Host Port: <none>
Command:
sleep
3600
State: Waiting
Reason: ImagePullBackOff
Ready: False
Restart Count: 0
Environment:
GOOGLE_APPLICATION_CREDENTIALS: /google-app-creds.json
PROJECT_ID: this_is_fake
GCP_PROJECT: this_is_fake
GCLOUD_PROJECT: this_is_fake
GOOGLE_CLOUD_PROJECT: this_is_fake
CLOUDSDK_CORE_PROJECT: this_is_fake
Mounts:
/google-app-creds.json from gcp-creds (ro)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-vpkxd (ro)
Conditions:
Type Status
PodReadyToStartContainers True
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
kube-api-access-vpkxd:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
ConfigMapOptional: <nil>
DownwardAPI: true
gcp-creds:
Type: HostPath (bare host directory volume)
Path: /var/lib/minikube/google_application_credentials.json
HostPathType: File
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 9m15s default-scheduler Successfully assigned default/busybox to addons-242108
Normal Pulling 7m55s (x4 over 9m14s) kubelet Pulling image "gcr.io/k8s-minikube/busybox:1.28.4-glibc"
Warning Failed 7m55s (x4 over 9m14s) kubelet Failed to pull image "gcr.io/k8s-minikube/busybox:1.28.4-glibc": Error response from daemon: Head "https://gcr.io/v2/k8s-minikube/busybox/manifests/1.28.4-glibc": unauthorized: authentication failed
Warning Failed 7m55s (x4 over 9m14s) kubelet Error: ErrImagePull
Warning Failed 7m31s (x6 over 9m14s) kubelet Error: ImagePullBackOff
Normal BackOff 4m1s (x21 over 9m14s) kubelet Back-off pulling image "gcr.io/k8s-minikube/busybox:1.28.4-glibc"
-- /stdout --
helpers_test.go:285: <<< TestAddons/parallel/Registry FAILED: end of post-mortem logs <<<
helpers_test.go:286: ---------------------/post-mortem---------------------------------
--- FAIL: TestAddons/parallel/Registry (72.48s)