=== RUN TestAddons/parallel/Ingress
=== PAUSE TestAddons/parallel/Ingress
=== CONT TestAddons/parallel/Ingress
addons_test.go:206: (dbg) Run: kubectl --context addons-946218 wait --for=condition=ready --namespace=ingress-nginx pod --selector=app.kubernetes.io/component=controller --timeout=90s
addons_test.go:231: (dbg) Run: kubectl --context addons-946218 replace --force -f testdata/nginx-ingress-v1.yaml
addons_test.go:244: (dbg) Run: kubectl --context addons-946218 replace --force -f testdata/nginx-pod-svc.yaml
addons_test.go:249: (dbg) TestAddons/parallel/Ingress: waiting 8m0s for pods matching "run=nginx" in namespace "default" ...
helpers_test.go:344: "nginx" [e5c19369-f547-463f-9bca-72592cfa8081] Pending / Ready:ContainersNotReady (containers with unready status: [nginx]) / ContainersReady:ContainersNotReady (containers with unready status: [nginx])
helpers_test.go:344: "nginx" [e5c19369-f547-463f-9bca-72592cfa8081] Running
addons_test.go:249: (dbg) TestAddons/parallel/Ingress: run=nginx healthy within 10.017678841s
addons_test.go:261: (dbg) Run: out/minikube-linux-arm64 -p addons-946218 ssh "curl -s http://127.0.0.1/ -H 'Host: nginx.example.com'"
addons_test.go:285: (dbg) Run: kubectl --context addons-946218 replace --force -f testdata/ingress-dns-example-v1.yaml
addons_test.go:290: (dbg) Run: out/minikube-linux-arm64 -p addons-946218 ip
addons_test.go:296: (dbg) Run: nslookup hello-john.test 192.168.49.2
addons_test.go:296: (dbg) Non-zero exit: nslookup hello-john.test 192.168.49.2: exit status 1 (15.056261302s)
-- stdout --
;; connection timed out; no servers could be reached
-- /stdout --
addons_test.go:298: failed to nslookup hello-john.test host. args "nslookup hello-john.test 192.168.49.2" : exit status 1
addons_test.go:302: unexpected output from nslookup. stdout: ;; connection timed out; no servers could be reached
stderr:
addons_test.go:305: (dbg) Run: out/minikube-linux-arm64 -p addons-946218 addons disable ingress-dns --alsologtostderr -v=1
addons_test.go:305: (dbg) Done: out/minikube-linux-arm64 -p addons-946218 addons disable ingress-dns --alsologtostderr -v=1: (1.314558899s)
addons_test.go:310: (dbg) Run: out/minikube-linux-arm64 -p addons-946218 addons disable ingress --alsologtostderr -v=1
addons_test.go:310: (dbg) Done: out/minikube-linux-arm64 -p addons-946218 addons disable ingress --alsologtostderr -v=1: (7.708580512s)
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======> post-mortem[TestAddons/parallel/Ingress]: docker inspect <======
helpers_test.go:231: (dbg) Run: docker inspect addons-946218
helpers_test.go:235: (dbg) docker inspect addons-946218:
-- stdout --
[
{
"Id": "4063cc8e22dc4d243949f18baf15d11eebf2eadd93529a9a1ef302a27f6379fb",
"Created": "2023-12-07T20:02:14.422683765Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 8660,
"ExitCode": 0,
"Error": "",
"StartedAt": "2023-12-07T20:02:14.778990063Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:769b0b780370d646693e9d8a4170c38d193d2f33565406ee9066915c40e406d4",
"ResolvConfPath": "/var/lib/docker/containers/4063cc8e22dc4d243949f18baf15d11eebf2eadd93529a9a1ef302a27f6379fb/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/4063cc8e22dc4d243949f18baf15d11eebf2eadd93529a9a1ef302a27f6379fb/hostname",
"HostsPath": "/var/lib/docker/containers/4063cc8e22dc4d243949f18baf15d11eebf2eadd93529a9a1ef302a27f6379fb/hosts",
"LogPath": "/var/lib/docker/containers/4063cc8e22dc4d243949f18baf15d11eebf2eadd93529a9a1ef302a27f6379fb/4063cc8e22dc4d243949f18baf15d11eebf2eadd93529a9a1ef302a27f6379fb-json.log",
"Name": "/addons-946218",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"addons-946218:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "addons-946218",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 4194304000,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 8388608000,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": null,
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"LowerDir": "/var/lib/docker/overlay2/d31a8d799f33954b837d7af69167c943d49d340ae3cf3e44f45d2d95d64afaf2-init/diff:/var/lib/docker/overlay2/baac1057f1861dfdebb7423d9d7ad7a05f930e41cec62cfa33740325cb982d86/diff",
"MergedDir": "/var/lib/docker/overlay2/d31a8d799f33954b837d7af69167c943d49d340ae3cf3e44f45d2d95d64afaf2/merged",
"UpperDir": "/var/lib/docker/overlay2/d31a8d799f33954b837d7af69167c943d49d340ae3cf3e44f45d2d95d64afaf2/diff",
"WorkDir": "/var/lib/docker/overlay2/d31a8d799f33954b837d7af69167c943d49d340ae3cf3e44f45d2d95d64afaf2/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "addons-946218",
"Source": "/var/lib/docker/volumes/addons-946218/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "addons-946218",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701974066-17719@sha256:cec630e7d143790c46e2dc54dbb8f39a22d8ede3e3c25e34638082e2c107a85c",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "addons-946218",
"name.minikube.sigs.k8s.io": "addons-946218",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "3a4eee3660c6fb02b5b5c447050b03765b57083007e8c5c7d189ee2c6410d7f4",
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32772"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32771"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32768"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32770"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32769"
}
]
},
"SandboxKey": "/var/run/docker/netns/3a4eee3660c6",
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"addons-946218": {
"IPAMConfig": {
"IPv4Address": "192.168.49.2"
},
"Links": null,
"Aliases": [
"4063cc8e22dc",
"addons-946218"
],
"NetworkID": "77b4e7bcb5771c3826c1a3f678a8465e2b8793781e244510ab152207e578ee18",
"EndpointID": "7bb83d5527c5428cf81001632da41f8ca7bacda0dd638bed9cc42cd12f448290",
"Gateway": "192.168.49.1",
"IPAddress": "192.168.49.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"MacAddress": "02:42:c0:a8:31:02",
"DriverOpts": null
}
}
}
}
]
-- /stdout --
helpers_test.go:239: (dbg) Run: out/minikube-linux-arm64 status --format={{.Host}} -p addons-946218 -n addons-946218
helpers_test.go:244: <<< TestAddons/parallel/Ingress FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======> post-mortem[TestAddons/parallel/Ingress]: minikube logs <======
helpers_test.go:247: (dbg) Run: out/minikube-linux-arm64 -p addons-946218 logs -n 25
helpers_test.go:247: (dbg) Done: out/minikube-linux-arm64 -p addons-946218 logs -n 25: (1.174493456s)
helpers_test.go:252: TestAddons/parallel/Ingress logs:
-- stdout --
*
* ==> Audit <==
* |---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
| Command | Args | Profile | User | Version | Start Time | End Time |
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
| start | -o=json --download-only | download-only-482552 | jenkins | v1.32.0 | 07 Dec 23 20:01 UTC | |
| | -p download-only-482552 | | | | | |
| | --force --alsologtostderr | | | | | |
| | --kubernetes-version=v1.16.0 | | | | | |
| | --container-runtime=docker | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| start | -o=json --download-only | download-only-482552 | jenkins | v1.32.0 | 07 Dec 23 20:01 UTC | |
| | -p download-only-482552 | | | | | |
| | --force --alsologtostderr | | | | | |
| | --kubernetes-version=v1.28.4 | | | | | |
| | --container-runtime=docker | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| start | -o=json --download-only | download-only-482552 | jenkins | v1.32.0 | 07 Dec 23 20:01 UTC | |
| | -p download-only-482552 | | | | | |
| | --force --alsologtostderr | | | | | |
| | --kubernetes-version=v1.29.0-rc.1 | | | | | |
| | --container-runtime=docker | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| delete | --all | minikube | jenkins | v1.32.0 | 07 Dec 23 20:01 UTC | 07 Dec 23 20:01 UTC |
| delete | -p download-only-482552 | download-only-482552 | jenkins | v1.32.0 | 07 Dec 23 20:01 UTC | 07 Dec 23 20:01 UTC |
| delete | -p download-only-482552 | download-only-482552 | jenkins | v1.32.0 | 07 Dec 23 20:01 UTC | 07 Dec 23 20:01 UTC |
| start | --download-only -p | download-docker-220646 | jenkins | v1.32.0 | 07 Dec 23 20:01 UTC | |
| | download-docker-220646 | | | | | |
| | --alsologtostderr | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| delete | -p download-docker-220646 | download-docker-220646 | jenkins | v1.32.0 | 07 Dec 23 20:01 UTC | 07 Dec 23 20:01 UTC |
| start | --download-only -p | binary-mirror-439095 | jenkins | v1.32.0 | 07 Dec 23 20:01 UTC | |
| | binary-mirror-439095 | | | | | |
| | --alsologtostderr | | | | | |
| | --binary-mirror | | | | | |
| | http://127.0.0.1:35697 | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| delete | -p binary-mirror-439095 | binary-mirror-439095 | jenkins | v1.32.0 | 07 Dec 23 20:01 UTC | 07 Dec 23 20:01 UTC |
| addons | enable dashboard -p | addons-946218 | jenkins | v1.32.0 | 07 Dec 23 20:01 UTC | |
| | addons-946218 | | | | | |
| addons | disable dashboard -p | addons-946218 | jenkins | v1.32.0 | 07 Dec 23 20:01 UTC | |
| | addons-946218 | | | | | |
| start | -p addons-946218 --wait=true | addons-946218 | jenkins | v1.32.0 | 07 Dec 23 20:01 UTC | 07 Dec 23 20:04 UTC |
| | --memory=4000 --alsologtostderr | | | | | |
| | --addons=registry | | | | | |
| | --addons=metrics-server | | | | | |
| | --addons=volumesnapshots | | | | | |
| | --addons=csi-hostpath-driver | | | | | |
| | --addons=gcp-auth | | | | | |
| | --addons=cloud-spanner | | | | | |
| | --addons=inspektor-gadget | | | | | |
| | --addons=storage-provisioner-rancher | | | | | |
| | --addons=nvidia-device-plugin | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| | --addons=ingress | | | | | |
| | --addons=ingress-dns | | | | | |
| ip | addons-946218 ip | addons-946218 | jenkins | v1.32.0 | 07 Dec 23 20:04 UTC | 07 Dec 23 20:04 UTC |
| addons | addons-946218 addons disable | addons-946218 | jenkins | v1.32.0 | 07 Dec 23 20:04 UTC | 07 Dec 23 20:04 UTC |
| | registry --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | addons-946218 addons | addons-946218 | jenkins | v1.32.0 | 07 Dec 23 20:04 UTC | 07 Dec 23 20:04 UTC |
| | disable metrics-server | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | disable inspektor-gadget -p | addons-946218 | jenkins | v1.32.0 | 07 Dec 23 20:04 UTC | 07 Dec 23 20:04 UTC |
| | addons-946218 | | | | | |
| ssh | addons-946218 ssh curl -s | addons-946218 | jenkins | v1.32.0 | 07 Dec 23 20:04 UTC | 07 Dec 23 20:04 UTC |
| | http://127.0.0.1/ -H 'Host: | | | | | |
| | nginx.example.com' | | | | | |
| ip | addons-946218 ip | addons-946218 | jenkins | v1.32.0 | 07 Dec 23 20:05 UTC | 07 Dec 23 20:05 UTC |
| addons | addons-946218 addons | addons-946218 | jenkins | v1.32.0 | 07 Dec 23 20:05 UTC | 07 Dec 23 20:05 UTC |
| | disable csi-hostpath-driver | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-946218 addons | addons-946218 | jenkins | v1.32.0 | 07 Dec 23 20:05 UTC | 07 Dec 23 20:05 UTC |
| | disable volumesnapshots | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-946218 addons disable | addons-946218 | jenkins | v1.32.0 | 07 Dec 23 20:05 UTC | 07 Dec 23 20:05 UTC |
| | ingress-dns --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | addons-946218 addons disable | addons-946218 | jenkins | v1.32.0 | 07 Dec 23 20:05 UTC | 07 Dec 23 20:05 UTC |
| | ingress --alsologtostderr -v=1 | | | | | |
| ssh | addons-946218 ssh cat | addons-946218 | jenkins | v1.32.0 | 07 Dec 23 20:05 UTC | 07 Dec 23 20:05 UTC |
| | /opt/local-path-provisioner/pvc-6224022a-bf0c-43f9-b398-1fc2163a085b_default_test-pvc/file1 | | | | | |
| addons | addons-946218 addons disable | addons-946218 | jenkins | v1.32.0 | 07 Dec 23 20:05 UTC | |
| | storage-provisioner-rancher | | | | | |
| | --alsologtostderr -v=1 | | | | | |
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
*
* ==> Last Start <==
* Log file created at: 2023/12/07 20:01:50
Running on machine: ip-172-31-29-130
Binary: Built with gc go1.21.4 for linux/arm64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1207 20:01:50.911042 8179 out.go:296] Setting OutFile to fd 1 ...
I1207 20:01:50.911181 8179 out.go:343] TERM=,COLORTERM=, which probably does not support color
I1207 20:01:50.911188 8179 out.go:309] Setting ErrFile to fd 2...
I1207 20:01:50.911194 8179 out.go:343] TERM=,COLORTERM=, which probably does not support color
I1207 20:01:50.911443 8179 root.go:338] Updating PATH: /home/jenkins/minikube-integration/17719-2292/.minikube/bin
I1207 20:01:50.911888 8179 out.go:303] Setting JSON to false
I1207 20:01:50.912619 8179 start.go:128] hostinfo: {"hostname":"ip-172-31-29-130","uptime":2654,"bootTime":1701976657,"procs":146,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1050-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"36adf542-ef4f-4e2d-a0c8-6868d1383ff9"}
I1207 20:01:50.912688 8179 start.go:138] virtualization:
I1207 20:01:50.915347 8179 out.go:177] * [addons-946218] minikube v1.32.0 on Ubuntu 20.04 (arm64)
I1207 20:01:50.917207 8179 out.go:177] - MINIKUBE_LOCATION=17719
I1207 20:01:50.919178 8179 out.go:177] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1207 20:01:50.917312 8179 notify.go:220] Checking for updates...
I1207 20:01:50.923232 8179 out.go:177] - KUBECONFIG=/home/jenkins/minikube-integration/17719-2292/kubeconfig
I1207 20:01:50.925256 8179 out.go:177] - MINIKUBE_HOME=/home/jenkins/minikube-integration/17719-2292/.minikube
I1207 20:01:50.926706 8179 out.go:177] - MINIKUBE_BIN=out/minikube-linux-arm64
I1207 20:01:50.928573 8179 out.go:177] - MINIKUBE_FORCE_SYSTEMD=
I1207 20:01:50.930464 8179 driver.go:392] Setting default libvirt URI to qemu:///system
I1207 20:01:50.954408 8179 docker.go:122] docker version: linux-24.0.7:Docker Engine - Community
I1207 20:01:50.954522 8179 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1207 20:01:51.037651 8179 info.go:266] docker info: {ID:U5VK:ZNT5:35M3:FHLW:Q7TL:ELFX:BNAG:AV4T:UD2H:SK5L:SEJV:SJJL Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:29 OomKillDisable:true NGoroutines:40 SystemTime:2023-12-07 20:01:51.027163355 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1050-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archi
tecture:aarch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8215044096 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-29-130 Labels:[] ExperimentalBuild:false ServerVersion:24.0.7 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f Expected:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f} RuncCommit:{ID:v1.1.10-0-g18a0cb0 Expected:v1.1.10-0-g18a0cb0} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.21.0]] Warnings:<nil>}}
I1207 20:01:51.037758 8179 docker.go:295] overlay module found
I1207 20:01:51.039791 8179 out.go:177] * Using the docker driver based on user configuration
I1207 20:01:51.041311 8179 start.go:298] selected driver: docker
I1207 20:01:51.041331 8179 start.go:902] validating driver "docker" against <nil>
I1207 20:01:51.041345 8179 start.go:913] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1207 20:01:51.042062 8179 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1207 20:01:51.117603 8179 info.go:266] docker info: {ID:U5VK:ZNT5:35M3:FHLW:Q7TL:ELFX:BNAG:AV4T:UD2H:SK5L:SEJV:SJJL Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:29 OomKillDisable:true NGoroutines:40 SystemTime:2023-12-07 20:01:51.107644708 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1050-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archi
tecture:aarch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8215044096 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-29-130 Labels:[] ExperimentalBuild:false ServerVersion:24.0.7 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f Expected:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f} RuncCommit:{ID:v1.1.10-0-g18a0cb0 Expected:v1.1.10-0-g18a0cb0} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.21.0]] Warnings:<nil>}}
I1207 20:01:51.117774 8179 start_flags.go:309] no existing cluster config was found, will generate one from the flags
I1207 20:01:51.118020 8179 start_flags.go:931] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1207 20:01:51.119812 8179 out.go:177] * Using Docker driver with root privileges
I1207 20:01:51.121644 8179 cni.go:84] Creating CNI manager for ""
I1207 20:01:51.121678 8179 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1207 20:01:51.121690 8179 start_flags.go:318] Found "bridge CNI" CNI - setting NetworkPlugin=cni
I1207 20:01:51.121706 8179 start_flags.go:323] config:
{Name:addons-946218 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701974066-17719@sha256:cec630e7d143790c46e2dc54dbb8f39a22d8ede3e3c25e34638082e2c107a85c Memory:4000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.4 ClusterName:addons-946218 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker C
RISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 AutoPauseInterval:1m0s GPUs:}
I1207 20:01:51.124104 8179 out.go:177] * Starting control plane node addons-946218 in cluster addons-946218
I1207 20:01:51.125801 8179 cache.go:121] Beginning downloading kic base image for docker with docker
I1207 20:01:51.127340 8179 out.go:177] * Pulling base image ...
I1207 20:01:51.128951 8179 preload.go:132] Checking if preload exists for k8s version v1.28.4 and runtime docker
I1207 20:01:51.129006 8179 preload.go:148] Found local preload: /home/jenkins/minikube-integration/17719-2292/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.4-docker-overlay2-arm64.tar.lz4
I1207 20:01:51.129030 8179 cache.go:56] Caching tarball of preloaded images
I1207 20:01:51.129054 8179 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701974066-17719@sha256:cec630e7d143790c46e2dc54dbb8f39a22d8ede3e3c25e34638082e2c107a85c in local docker daemon
I1207 20:01:51.129112 8179 preload.go:174] Found /home/jenkins/minikube-integration/17719-2292/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.4-docker-overlay2-arm64.tar.lz4 in cache, skipping download
I1207 20:01:51.129123 8179 cache.go:59] Finished verifying existence of preloaded tar for v1.28.4 on docker
I1207 20:01:51.129476 8179 profile.go:148] Saving config to /home/jenkins/minikube-integration/17719-2292/.minikube/profiles/addons-946218/config.json ...
I1207 20:01:51.129505 8179 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/17719-2292/.minikube/profiles/addons-946218/config.json: {Name:mk65069aa441c08498f81925dabee829bb67bbf3 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1207 20:01:51.149899 8179 cache.go:149] Downloading gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701974066-17719@sha256:cec630e7d143790c46e2dc54dbb8f39a22d8ede3e3c25e34638082e2c107a85c to local cache
I1207 20:01:51.150027 8179 image.go:63] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701974066-17719@sha256:cec630e7d143790c46e2dc54dbb8f39a22d8ede3e3c25e34638082e2c107a85c in local cache directory
I1207 20:01:51.150048 8179 image.go:66] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701974066-17719@sha256:cec630e7d143790c46e2dc54dbb8f39a22d8ede3e3c25e34638082e2c107a85c in local cache directory, skipping pull
I1207 20:01:51.150054 8179 image.go:105] gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701974066-17719@sha256:cec630e7d143790c46e2dc54dbb8f39a22d8ede3e3c25e34638082e2c107a85c exists in cache, skipping pull
I1207 20:01:51.150062 8179 cache.go:152] successfully saved gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701974066-17719@sha256:cec630e7d143790c46e2dc54dbb8f39a22d8ede3e3c25e34638082e2c107a85c as a tarball
I1207 20:01:51.150068 8179 cache.go:162] Loading gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701974066-17719@sha256:cec630e7d143790c46e2dc54dbb8f39a22d8ede3e3c25e34638082e2c107a85c from local cache
I1207 20:02:07.120398 8179 cache.go:164] successfully loaded and using gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701974066-17719@sha256:cec630e7d143790c46e2dc54dbb8f39a22d8ede3e3c25e34638082e2c107a85c from cached tarball
I1207 20:02:07.120440 8179 cache.go:194] Successfully downloaded all kic artifacts
I1207 20:02:07.120492 8179 start.go:365] acquiring machines lock for addons-946218: {Name:mk3e8ea9a98cede806ec856689b0f5b5eaa03b4b Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1207 20:02:07.120623 8179 start.go:369] acquired machines lock for "addons-946218" in 109.168µs
I1207 20:02:07.120657 8179 start.go:93] Provisioning new machine with config: &{Name:addons-946218 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701974066-17719@sha256:cec630e7d143790c46e2dc54dbb8f39a22d8ede3e3c25e34638082e2c107a85c Memory:4000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.4 ClusterName:addons-946218 Namespace:default APIServerName:minikubeCA A
PIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.28.4 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false Di
sableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 AutoPauseInterval:1m0s GPUs:} &{Name: IP: Port:8443 KubernetesVersion:v1.28.4 ContainerRuntime:docker ControlPlane:true Worker:true}
I1207 20:02:07.120756 8179 start.go:125] createHost starting for "" (driver="docker")
I1207 20:02:07.122937 8179 out.go:204] * Creating docker container (CPUs=2, Memory=4000MB) ...
I1207 20:02:07.123185 8179 start.go:159] libmachine.API.Create for "addons-946218" (driver="docker")
I1207 20:02:07.123216 8179 client.go:168] LocalClient.Create starting
I1207 20:02:07.123359 8179 main.go:141] libmachine: Creating CA: /home/jenkins/minikube-integration/17719-2292/.minikube/certs/ca.pem
I1207 20:02:07.498270 8179 main.go:141] libmachine: Creating client certificate: /home/jenkins/minikube-integration/17719-2292/.minikube/certs/cert.pem
I1207 20:02:08.130332 8179 cli_runner.go:164] Run: docker network inspect addons-946218 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1207 20:02:08.147431 8179 cli_runner.go:211] docker network inspect addons-946218 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1207 20:02:08.147512 8179 network_create.go:281] running [docker network inspect addons-946218] to gather additional debugging logs...
I1207 20:02:08.147533 8179 cli_runner.go:164] Run: docker network inspect addons-946218
W1207 20:02:08.165776 8179 cli_runner.go:211] docker network inspect addons-946218 returned with exit code 1
I1207 20:02:08.165801 8179 network_create.go:284] error running [docker network inspect addons-946218]: docker network inspect addons-946218: exit status 1
stdout:
[]
stderr:
Error response from daemon: network addons-946218 not found
I1207 20:02:08.165814 8179 network_create.go:286] output of [docker network inspect addons-946218]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network addons-946218 not found
** /stderr **
I1207 20:02:08.165906 8179 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1207 20:02:08.184977 8179 network.go:209] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x40024feea0}
I1207 20:02:08.185022 8179 network_create.go:124] attempt to create docker network addons-946218 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
I1207 20:02:08.185085 8179 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=addons-946218 addons-946218
I1207 20:02:08.255850 8179 network_create.go:108] docker network addons-946218 192.168.49.0/24 created
I1207 20:02:08.255891 8179 kic.go:121] calculated static IP "192.168.49.2" for the "addons-946218" container
I1207 20:02:08.255968 8179 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1207 20:02:08.272787 8179 cli_runner.go:164] Run: docker volume create addons-946218 --label name.minikube.sigs.k8s.io=addons-946218 --label created_by.minikube.sigs.k8s.io=true
I1207 20:02:08.291574 8179 oci.go:103] Successfully created a docker volume addons-946218
I1207 20:02:08.291663 8179 cli_runner.go:164] Run: docker run --rm --name addons-946218-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-946218 --entrypoint /usr/bin/test -v addons-946218:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701974066-17719@sha256:cec630e7d143790c46e2dc54dbb8f39a22d8ede3e3c25e34638082e2c107a85c -d /var/lib
I1207 20:02:10.319625 8179 cli_runner.go:217] Completed: docker run --rm --name addons-946218-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-946218 --entrypoint /usr/bin/test -v addons-946218:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701974066-17719@sha256:cec630e7d143790c46e2dc54dbb8f39a22d8ede3e3c25e34638082e2c107a85c -d /var/lib: (2.027922891s)
I1207 20:02:10.319654 8179 oci.go:107] Successfully prepared a docker volume addons-946218
I1207 20:02:10.319692 8179 preload.go:132] Checking if preload exists for k8s version v1.28.4 and runtime docker
I1207 20:02:10.319716 8179 kic.go:194] Starting extracting preloaded images to volume ...
I1207 20:02:10.319798 8179 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/17719-2292/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.4-docker-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v addons-946218:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701974066-17719@sha256:cec630e7d143790c46e2dc54dbb8f39a22d8ede3e3c25e34638082e2c107a85c -I lz4 -xf /preloaded.tar -C /extractDir
I1207 20:02:14.337286 8179 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/17719-2292/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.4-docker-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v addons-946218:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701974066-17719@sha256:cec630e7d143790c46e2dc54dbb8f39a22d8ede3e3c25e34638082e2c107a85c -I lz4 -xf /preloaded.tar -C /extractDir: (4.017446428s)
I1207 20:02:14.337316 8179 kic.go:203] duration metric: took 4.017598 seconds to extract preloaded images to volume
W1207 20:02:14.337517 8179 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I1207 20:02:14.337632 8179 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1207 20:02:14.406577 8179 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname addons-946218 --name addons-946218 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-946218 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=addons-946218 --network addons-946218 --ip 192.168.49.2 --volume addons-946218:/var --security-opt apparmor=unconfined --memory=4000mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701974066-17719@sha256:cec630e7d143790c46e2dc54dbb8f39a22d8ede3e3c25e34638082e2c107a85c
I1207 20:02:14.788026 8179 cli_runner.go:164] Run: docker container inspect addons-946218 --format={{.State.Running}}
I1207 20:02:14.809075 8179 cli_runner.go:164] Run: docker container inspect addons-946218 --format={{.State.Status}}
I1207 20:02:14.833186 8179 cli_runner.go:164] Run: docker exec addons-946218 stat /var/lib/dpkg/alternatives/iptables
I1207 20:02:14.901028 8179 oci.go:144] the created container "addons-946218" has a running status.
I1207 20:02:14.901053 8179 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/17719-2292/.minikube/machines/addons-946218/id_rsa...
I1207 20:02:15.616907 8179 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/17719-2292/.minikube/machines/addons-946218/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1207 20:02:15.646402 8179 cli_runner.go:164] Run: docker container inspect addons-946218 --format={{.State.Status}}
I1207 20:02:15.671961 8179 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1207 20:02:15.671980 8179 kic_runner.go:114] Args: [docker exec --privileged addons-946218 chown docker:docker /home/docker/.ssh/authorized_keys]
I1207 20:02:15.749039 8179 cli_runner.go:164] Run: docker container inspect addons-946218 --format={{.State.Status}}
I1207 20:02:15.780976 8179 machine.go:88] provisioning docker machine ...
I1207 20:02:15.781004 8179 ubuntu.go:169] provisioning hostname "addons-946218"
I1207 20:02:15.781068 8179 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-946218
I1207 20:02:15.810069 8179 main.go:141] libmachine: Using SSH client type: native
I1207 20:02:15.810485 8179 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3be600] 0x3c0d70 <nil> [] 0s} 127.0.0.1 32772 <nil> <nil>}
I1207 20:02:15.810497 8179 main.go:141] libmachine: About to run SSH command:
sudo hostname addons-946218 && echo "addons-946218" | sudo tee /etc/hostname
I1207 20:02:15.986914 8179 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-946218
I1207 20:02:15.986986 8179 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-946218
I1207 20:02:16.014428 8179 main.go:141] libmachine: Using SSH client type: native
I1207 20:02:16.014834 8179 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3be600] 0x3c0d70 <nil> [] 0s} 127.0.0.1 32772 <nil> <nil>}
I1207 20:02:16.014853 8179 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\saddons-946218' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 addons-946218/g' /etc/hosts;
else
echo '127.0.1.1 addons-946218' | sudo tee -a /etc/hosts;
fi
fi
I1207 20:02:16.149914 8179 main.go:141] libmachine: SSH cmd err, output: <nil>:
I1207 20:02:16.149948 8179 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/17719-2292/.minikube CaCertPath:/home/jenkins/minikube-integration/17719-2292/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/17719-2292/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/17719-2292/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/17719-2292/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/17719-2292/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/17719-2292/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/17719-2292/.minikube}
I1207 20:02:16.149974 8179 ubuntu.go:177] setting up certificates
I1207 20:02:16.149981 8179 provision.go:83] configureAuth start
I1207 20:02:16.150038 8179 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-946218
I1207 20:02:16.169311 8179 provision.go:138] copyHostCerts
I1207 20:02:16.169392 8179 exec_runner.go:151] cp: /home/jenkins/minikube-integration/17719-2292/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/17719-2292/.minikube/ca.pem (1078 bytes)
I1207 20:02:16.169523 8179 exec_runner.go:151] cp: /home/jenkins/minikube-integration/17719-2292/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/17719-2292/.minikube/cert.pem (1123 bytes)
I1207 20:02:16.169596 8179 exec_runner.go:151] cp: /home/jenkins/minikube-integration/17719-2292/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/17719-2292/.minikube/key.pem (1679 bytes)
I1207 20:02:16.169657 8179 provision.go:112] generating server cert: /home/jenkins/minikube-integration/17719-2292/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/17719-2292/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/17719-2292/.minikube/certs/ca-key.pem org=jenkins.addons-946218 san=[192.168.49.2 127.0.0.1 localhost 127.0.0.1 minikube addons-946218]
I1207 20:02:16.907931 8179 provision.go:172] copyRemoteCerts
I1207 20:02:16.908023 8179 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1207 20:02:16.908065 8179 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-946218
I1207 20:02:16.925598 8179 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/17719-2292/.minikube/machines/addons-946218/id_rsa Username:docker}
I1207 20:02:17.019243 8179 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17719-2292/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I1207 20:02:17.047500 8179 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17719-2292/.minikube/machines/server.pem --> /etc/docker/server.pem (1216 bytes)
I1207 20:02:17.074397 8179 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17719-2292/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1207 20:02:17.101962 8179 provision.go:86] duration metric: configureAuth took 951.96757ms
I1207 20:02:17.101987 8179 ubuntu.go:193] setting minikube options for container-runtime
I1207 20:02:17.102171 8179 config.go:182] Loaded profile config "addons-946218": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.28.4
I1207 20:02:17.102227 8179 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-946218
I1207 20:02:17.120182 8179 main.go:141] libmachine: Using SSH client type: native
I1207 20:02:17.120578 8179 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3be600] 0x3c0d70 <nil> [] 0s} 127.0.0.1 32772 <nil> <nil>}
I1207 20:02:17.120594 8179 main.go:141] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I1207 20:02:17.246397 8179 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
I1207 20:02:17.246419 8179 ubuntu.go:71] root file system type: overlay
I1207 20:02:17.246524 8179 provision.go:309] Updating docker unit: /lib/systemd/system/docker.service ...
I1207 20:02:17.246613 8179 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-946218
I1207 20:02:17.265802 8179 main.go:141] libmachine: Using SSH client type: native
I1207 20:02:17.266213 8179 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3be600] 0x3c0d70 <nil> [] 0s} 127.0.0.1 32772 <nil> <nil>}
I1207 20:02:17.266301 8179 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %!s(MISSING) "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I1207 20:02:17.407760 8179 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
I1207 20:02:17.407848 8179 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-946218
I1207 20:02:17.427552 8179 main.go:141] libmachine: Using SSH client type: native
I1207 20:02:17.427975 8179 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3be600] 0x3c0d70 <nil> [] 0s} 127.0.0.1 32772 <nil> <nil>}
I1207 20:02:17.427994 8179 main.go:141] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I1207 20:02:18.263783 8179 main.go:141] libmachine: SSH cmd err, output: <nil>: --- /lib/systemd/system/docker.service 2023-10-26 09:06:20.000000000 +0000
+++ /lib/systemd/system/docker.service.new 2023-12-07 20:02:17.404474703 +0000
@@ -1,30 +1,32 @@
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
-After=network-online.target docker.socket firewalld.service containerd.service time-set.target
-Wants=network-online.target containerd.service
+BindsTo=containerd.service
+After=network-online.target firewalld.service containerd.service
+Wants=network-online.target
Requires=docker.socket
+StartLimitBurst=3
+StartLimitIntervalSec=60
[Service]
Type=notify
-# the default is not to use systemd for cgroups because the delegate issues still
-# exists and systemd currently does not support the cgroup feature set required
-# for containers run by docker
-ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
-ExecReload=/bin/kill -s HUP $MAINPID
-TimeoutStartSec=0
-RestartSec=2
-Restart=always
+Restart=on-failure
-# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
-# Both the old, and new location are accepted by systemd 229 and up, so using the old location
-# to make them work for either version of systemd.
-StartLimitBurst=3
-# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
-# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
-# this option work for either version of systemd.
-StartLimitInterval=60s
+
+# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
+# The base configuration already specifies an 'ExecStart=...' command. The first directive
+# here is to clear out that command inherited from the base configuration. Without this,
+# the command from the base configuration and the command specified here are treated as
+# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
+# will catch this invalid input and refuse to start the service with an error like:
+# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
+
+# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
+# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
+ExecStart=
+ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
+ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
@@ -32,16 +34,16 @@
LimitNPROC=infinity
LimitCORE=infinity
-# Comment TasksMax if your systemd version does not support it.
-# Only systemd 226 and above support this option.
+# Uncomment TasksMax if your systemd version supports it.
+# Only systemd 226 and above support this version.
TasksMax=infinity
+TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
-OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable docker
I1207 20:02:18.263816 8179 machine.go:91] provisioned docker machine in 2.482822833s
I1207 20:02:18.263828 8179 client.go:171] LocalClient.Create took 11.140602132s
I1207 20:02:18.263840 8179 start.go:167] duration metric: libmachine.API.Create for "addons-946218" took 11.140655269s
I1207 20:02:18.263847 8179 start.go:300] post-start starting for "addons-946218" (driver="docker")
I1207 20:02:18.263856 8179 start.go:329] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1207 20:02:18.263921 8179 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1207 20:02:18.263968 8179 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-946218
I1207 20:02:18.282561 8179 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/17719-2292/.minikube/machines/addons-946218/id_rsa Username:docker}
I1207 20:02:18.375747 8179 ssh_runner.go:195] Run: cat /etc/os-release
I1207 20:02:18.379856 8179 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1207 20:02:18.379893 8179 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I1207 20:02:18.379905 8179 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I1207 20:02:18.379912 8179 info.go:137] Remote host: Ubuntu 22.04.3 LTS
I1207 20:02:18.379924 8179 filesync.go:126] Scanning /home/jenkins/minikube-integration/17719-2292/.minikube/addons for local assets ...
I1207 20:02:18.380001 8179 filesync.go:126] Scanning /home/jenkins/minikube-integration/17719-2292/.minikube/files for local assets ...
I1207 20:02:18.380030 8179 start.go:303] post-start completed in 116.177631ms
I1207 20:02:18.380328 8179 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-946218
I1207 20:02:18.398380 8179 profile.go:148] Saving config to /home/jenkins/minikube-integration/17719-2292/.minikube/profiles/addons-946218/config.json ...
I1207 20:02:18.398658 8179 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1207 20:02:18.398707 8179 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-946218
I1207 20:02:18.416869 8179 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/17719-2292/.minikube/machines/addons-946218/id_rsa Username:docker}
I1207 20:02:18.506730 8179 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1207 20:02:18.512471 8179 start.go:128] duration metric: createHost completed in 11.391696166s
I1207 20:02:18.512496 8179 start.go:83] releasing machines lock for "addons-946218", held for 11.391858224s
I1207 20:02:18.512563 8179 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-946218
I1207 20:02:18.534650 8179 ssh_runner.go:195] Run: cat /version.json
I1207 20:02:18.534708 8179 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-946218
I1207 20:02:18.534953 8179 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1207 20:02:18.535014 8179 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-946218
I1207 20:02:18.562640 8179 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/17719-2292/.minikube/machines/addons-946218/id_rsa Username:docker}
I1207 20:02:18.574238 8179 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/17719-2292/.minikube/machines/addons-946218/id_rsa Username:docker}
I1207 20:02:18.779594 8179 ssh_runner.go:195] Run: systemctl --version
I1207 20:02:18.785255 8179 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I1207 20:02:18.790921 8179 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I1207 20:02:18.822176 8179 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I1207 20:02:18.822259 8179 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%!p(MISSING), " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1207 20:02:18.857621 8179 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I1207 20:02:18.857688 8179 start.go:475] detecting cgroup driver to use...
I1207 20:02:18.857727 8179 detect.go:196] detected "cgroupfs" cgroup driver on host os
I1207 20:02:18.857844 8179 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %!s(MISSING) "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1207 20:02:18.877629 8179 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.9"|' /etc/containerd/config.toml"
I1207 20:02:18.889551 8179 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1207 20:02:18.901430 8179 containerd.go:145] configuring containerd to use "cgroupfs" as cgroup driver...
I1207 20:02:18.901538 8179 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1207 20:02:18.913289 8179 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1207 20:02:18.924952 8179 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1207 20:02:18.936795 8179 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1207 20:02:18.948979 8179 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1207 20:02:18.960222 8179 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1207 20:02:18.972286 8179 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1207 20:02:18.982904 8179 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1207 20:02:18.993210 8179 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1207 20:02:19.090376 8179 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1207 20:02:19.209266 8179 start.go:475] detecting cgroup driver to use...
I1207 20:02:19.209320 8179 detect.go:196] detected "cgroupfs" cgroup driver on host os
I1207 20:02:19.209391 8179 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I1207 20:02:19.227314 8179 cruntime.go:279] skipping containerd shutdown because we are bound to it
I1207 20:02:19.227392 8179 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1207 20:02:19.243046 8179 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %!s(MISSING) "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I1207 20:02:19.264725 8179 ssh_runner.go:195] Run: which cri-dockerd
I1207 20:02:19.269966 8179 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I1207 20:02:19.281382 8179 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (189 bytes)
I1207 20:02:19.305959 8179 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I1207 20:02:19.420400 8179 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I1207 20:02:19.532828 8179 docker.go:560] configuring docker to use "cgroupfs" as cgroup driver...
I1207 20:02:19.532958 8179 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I1207 20:02:19.555845 8179 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1207 20:02:19.659381 8179 ssh_runner.go:195] Run: sudo systemctl restart docker
I1207 20:02:19.945509 8179 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I1207 20:02:20.047016 8179 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I1207 20:02:20.152061 8179 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I1207 20:02:20.265329 8179 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1207 20:02:20.369873 8179 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I1207 20:02:20.386401 8179 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1207 20:02:20.479121 8179 ssh_runner.go:195] Run: sudo systemctl restart cri-docker
I1207 20:02:20.565710 8179 start.go:522] Will wait 60s for socket path /var/run/cri-dockerd.sock
I1207 20:02:20.565869 8179 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I1207 20:02:20.571517 8179 start.go:543] Will wait 60s for crictl version
I1207 20:02:20.571624 8179 ssh_runner.go:195] Run: which crictl
I1207 20:02:20.576310 8179 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I1207 20:02:20.631331 8179 start.go:559] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 24.0.7
RuntimeApiVersion: v1
I1207 20:02:20.631469 8179 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I1207 20:02:20.658222 8179 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I1207 20:02:20.693007 8179 out.go:204] * Preparing Kubernetes v1.28.4 on Docker 24.0.7 ...
I1207 20:02:20.693137 8179 cli_runner.go:164] Run: docker network inspect addons-946218 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1207 20:02:20.711671 8179 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I1207 20:02:20.717395 8179 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1207 20:02:20.730459 8179 preload.go:132] Checking if preload exists for k8s version v1.28.4 and runtime docker
I1207 20:02:20.730534 8179 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I1207 20:02:20.750806 8179 docker.go:671] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.28.4
registry.k8s.io/kube-scheduler:v1.28.4
registry.k8s.io/kube-controller-manager:v1.28.4
registry.k8s.io/kube-proxy:v1.28.4
registry.k8s.io/etcd:3.5.9-0
registry.k8s.io/coredns/coredns:v1.10.1
registry.k8s.io/pause:3.9
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I1207 20:02:20.750828 8179 docker.go:601] Images already preloaded, skipping extraction
I1207 20:02:20.750893 8179 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I1207 20:02:20.770859 8179 docker.go:671] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.28.4
registry.k8s.io/kube-controller-manager:v1.28.4
registry.k8s.io/kube-scheduler:v1.28.4
registry.k8s.io/kube-proxy:v1.28.4
registry.k8s.io/etcd:3.5.9-0
registry.k8s.io/coredns/coredns:v1.10.1
registry.k8s.io/pause:3.9
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I1207 20:02:20.770879 8179 cache_images.go:84] Images are preloaded, skipping loading
I1207 20:02:20.770936 8179 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I1207 20:02:20.837307 8179 cni.go:84] Creating CNI manager for ""
I1207 20:02:20.837332 8179 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1207 20:02:20.837363 8179 kubeadm.go:87] Using pod CIDR: 10.244.0.0/16
I1207 20:02:20.837382 8179 kubeadm.go:176] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.28.4 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:addons-946218 NodeName:addons-946218 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kuber
netes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1207 20:02:20.837520 8179 kubeadm.go:181] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.49.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "addons-946218"
kubeletExtraArgs:
node-ip: 192.168.49.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.28.4
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%!"(MISSING)
nodefs.inodesFree: "0%!"(MISSING)
imagefs.available: "0%!"(MISSING)
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1207 20:02:20.837580 8179 kubeadm.go:976] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.28.4/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --container-runtime-endpoint=unix:///var/run/cri-dockerd.sock --hostname-override=addons-946218 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
[Install]
config:
{KubernetesVersion:v1.28.4 ClusterName:addons-946218 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:}
I1207 20:02:20.837645 8179 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.28.4
I1207 20:02:20.848265 8179 binaries.go:44] Found k8s binaries, skipping transfer
I1207 20:02:20.848335 8179 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1207 20:02:20.858677 8179 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (374 bytes)
I1207 20:02:20.879501 8179 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1207 20:02:20.900528 8179 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2096 bytes)
I1207 20:02:20.923940 8179 ssh_runner.go:195] Run: grep 192.168.49.2 control-plane.minikube.internal$ /etc/hosts
I1207 20:02:20.929099 8179 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1207 20:02:20.942761 8179 certs.go:56] Setting up /home/jenkins/minikube-integration/17719-2292/.minikube/profiles/addons-946218 for IP: 192.168.49.2
I1207 20:02:20.942791 8179 certs.go:190] acquiring lock for shared ca certs: {Name:mkf0aeb9e21068cbc2b0de52461bf1fef9a8e437 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1207 20:02:20.942964 8179 certs.go:204] generating minikubeCA CA: /home/jenkins/minikube-integration/17719-2292/.minikube/ca.key
I1207 20:02:21.380265 8179 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/17719-2292/.minikube/ca.crt ...
I1207 20:02:21.380296 8179 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/17719-2292/.minikube/ca.crt: {Name:mk7ceea1db1bd78d8ff3cd83388dfb039d50578e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1207 20:02:21.380526 8179 crypto.go:164] Writing key to /home/jenkins/minikube-integration/17719-2292/.minikube/ca.key ...
I1207 20:02:21.380540 8179 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/17719-2292/.minikube/ca.key: {Name:mkc3769f3f69c6769c51828d730055861365cdd5 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1207 20:02:21.380667 8179 certs.go:204] generating proxyClientCA CA: /home/jenkins/minikube-integration/17719-2292/.minikube/proxy-client-ca.key
I1207 20:02:22.095716 8179 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/17719-2292/.minikube/proxy-client-ca.crt ...
I1207 20:02:22.095747 8179 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/17719-2292/.minikube/proxy-client-ca.crt: {Name:mk696882be0b9995b623d2694c88b096a79390bf Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1207 20:02:22.095930 8179 crypto.go:164] Writing key to /home/jenkins/minikube-integration/17719-2292/.minikube/proxy-client-ca.key ...
I1207 20:02:22.095942 8179 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/17719-2292/.minikube/proxy-client-ca.key: {Name:mk48e3e2b08aef7f4d025a730522ceaed8b9f0fa Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1207 20:02:22.096063 8179 certs.go:319] generating minikube-user signed cert: /home/jenkins/minikube-integration/17719-2292/.minikube/profiles/addons-946218/client.key
I1207 20:02:22.096082 8179 crypto.go:68] Generating cert /home/jenkins/minikube-integration/17719-2292/.minikube/profiles/addons-946218/client.crt with IP's: []
I1207 20:02:22.997954 8179 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/17719-2292/.minikube/profiles/addons-946218/client.crt ...
I1207 20:02:22.997985 8179 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/17719-2292/.minikube/profiles/addons-946218/client.crt: {Name:mk9ccbb531a4a93d5f2c07b671553d9c617c73f1 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1207 20:02:22.998170 8179 crypto.go:164] Writing key to /home/jenkins/minikube-integration/17719-2292/.minikube/profiles/addons-946218/client.key ...
I1207 20:02:22.998183 8179 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/17719-2292/.minikube/profiles/addons-946218/client.key: {Name:mk4681cc26d44551b42b7fc6818bbc6210d48707 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1207 20:02:22.998261 8179 certs.go:319] generating minikube signed cert: /home/jenkins/minikube-integration/17719-2292/.minikube/profiles/addons-946218/apiserver.key.dd3b5fb2
I1207 20:02:22.998282 8179 crypto.go:68] Generating cert /home/jenkins/minikube-integration/17719-2292/.minikube/profiles/addons-946218/apiserver.crt.dd3b5fb2 with IP's: [192.168.49.2 10.96.0.1 127.0.0.1 10.0.0.1]
I1207 20:02:23.233452 8179 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/17719-2292/.minikube/profiles/addons-946218/apiserver.crt.dd3b5fb2 ...
I1207 20:02:23.233481 8179 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/17719-2292/.minikube/profiles/addons-946218/apiserver.crt.dd3b5fb2: {Name:mkcd59d7086ebabbdb32ca5bbf6fc57296fe3fb1 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1207 20:02:23.233656 8179 crypto.go:164] Writing key to /home/jenkins/minikube-integration/17719-2292/.minikube/profiles/addons-946218/apiserver.key.dd3b5fb2 ...
I1207 20:02:23.233670 8179 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/17719-2292/.minikube/profiles/addons-946218/apiserver.key.dd3b5fb2: {Name:mke22dec4fec3cb0156b8f784fed6b2c7207d372 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1207 20:02:23.233751 8179 certs.go:337] copying /home/jenkins/minikube-integration/17719-2292/.minikube/profiles/addons-946218/apiserver.crt.dd3b5fb2 -> /home/jenkins/minikube-integration/17719-2292/.minikube/profiles/addons-946218/apiserver.crt
I1207 20:02:23.233828 8179 certs.go:341] copying /home/jenkins/minikube-integration/17719-2292/.minikube/profiles/addons-946218/apiserver.key.dd3b5fb2 -> /home/jenkins/minikube-integration/17719-2292/.minikube/profiles/addons-946218/apiserver.key
I1207 20:02:23.233877 8179 certs.go:319] generating aggregator signed cert: /home/jenkins/minikube-integration/17719-2292/.minikube/profiles/addons-946218/proxy-client.key
I1207 20:02:23.233895 8179 crypto.go:68] Generating cert /home/jenkins/minikube-integration/17719-2292/.minikube/profiles/addons-946218/proxy-client.crt with IP's: []
I1207 20:02:23.834707 8179 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/17719-2292/.minikube/profiles/addons-946218/proxy-client.crt ...
I1207 20:02:23.834737 8179 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/17719-2292/.minikube/profiles/addons-946218/proxy-client.crt: {Name:mk879aecf0537d0eb357029d6bec319cf746a098 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1207 20:02:23.834914 8179 crypto.go:164] Writing key to /home/jenkins/minikube-integration/17719-2292/.minikube/profiles/addons-946218/proxy-client.key ...
I1207 20:02:23.834925 8179 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/17719-2292/.minikube/profiles/addons-946218/proxy-client.key: {Name:mk04ea429673542cd5a29ed3d278e8c240fadb62 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1207 20:02:23.835119 8179 certs.go:437] found cert: /home/jenkins/minikube-integration/17719-2292/.minikube/certs/home/jenkins/minikube-integration/17719-2292/.minikube/certs/ca-key.pem (1675 bytes)
I1207 20:02:23.835159 8179 certs.go:437] found cert: /home/jenkins/minikube-integration/17719-2292/.minikube/certs/home/jenkins/minikube-integration/17719-2292/.minikube/certs/ca.pem (1078 bytes)
I1207 20:02:23.835189 8179 certs.go:437] found cert: /home/jenkins/minikube-integration/17719-2292/.minikube/certs/home/jenkins/minikube-integration/17719-2292/.minikube/certs/cert.pem (1123 bytes)
I1207 20:02:23.835224 8179 certs.go:437] found cert: /home/jenkins/minikube-integration/17719-2292/.minikube/certs/home/jenkins/minikube-integration/17719-2292/.minikube/certs/key.pem (1679 bytes)
I1207 20:02:23.835862 8179 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17719-2292/.minikube/profiles/addons-946218/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1399 bytes)
I1207 20:02:23.865274 8179 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17719-2292/.minikube/profiles/addons-946218/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I1207 20:02:23.892902 8179 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17719-2292/.minikube/profiles/addons-946218/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1207 20:02:23.920516 8179 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17719-2292/.minikube/profiles/addons-946218/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I1207 20:02:23.948664 8179 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17719-2292/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1207 20:02:23.975980 8179 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17719-2292/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I1207 20:02:24.005605 8179 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17719-2292/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1207 20:02:24.036946 8179 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17719-2292/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I1207 20:02:24.067904 8179 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17719-2292/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1207 20:02:24.098948 8179 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1207 20:02:24.121826 8179 ssh_runner.go:195] Run: openssl version
I1207 20:02:24.129227 8179 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1207 20:02:24.142052 8179 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1207 20:02:24.147107 8179 certs.go:480] hashing: -rw-r--r-- 1 root root 1111 Dec 7 20:02 /usr/share/ca-certificates/minikubeCA.pem
I1207 20:02:24.147224 8179 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1207 20:02:24.157744 8179 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1207 20:02:24.169925 8179 ssh_runner.go:195] Run: ls /var/lib/minikube/certs/etcd
I1207 20:02:24.174611 8179 certs.go:353] certs directory doesn't exist, likely first start: ls /var/lib/minikube/certs/etcd: Process exited with status 2
stdout:
stderr:
ls: cannot access '/var/lib/minikube/certs/etcd': No such file or directory
I1207 20:02:24.174714 8179 kubeadm.go:404] StartCluster: {Name:addons-946218 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701974066-17719@sha256:cec630e7d143790c46e2dc54dbb8f39a22d8ede3e3c25e34638082e2c107a85c Memory:4000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.4 ClusterName:addons-946218 Namespace:default APIServerName:minikubeCA APIServerNames:[] APISe
rverIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.28.4 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetri
cs:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 AutoPauseInterval:1m0s GPUs:}
I1207 20:02:24.174868 8179 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I1207 20:02:24.196638 8179 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1207 20:02:24.207710 8179 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1207 20:02:24.218532 8179 kubeadm.go:226] ignoring SystemVerification for kubeadm because of docker driver
I1207 20:02:24.218622 8179 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1207 20:02:24.229151 8179 kubeadm.go:152] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1207 20:02:24.229199 8179 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.28.4:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1207 20:02:24.279389 8179 kubeadm.go:322] [init] Using Kubernetes version: v1.28.4
I1207 20:02:24.279618 8179 kubeadm.go:322] [preflight] Running pre-flight checks
I1207 20:02:24.337918 8179 kubeadm.go:322] [preflight] The system verification failed. Printing the output from the verification:
I1207 20:02:24.337987 8179 kubeadm.go:322] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1050-aws[0m
I1207 20:02:24.338025 8179 kubeadm.go:322] [0;37mOS[0m: [0;32mLinux[0m
I1207 20:02:24.338072 8179 kubeadm.go:322] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1207 20:02:24.338122 8179 kubeadm.go:322] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I1207 20:02:24.338170 8179 kubeadm.go:322] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1207 20:02:24.338219 8179 kubeadm.go:322] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1207 20:02:24.338271 8179 kubeadm.go:322] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1207 20:02:24.338328 8179 kubeadm.go:322] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1207 20:02:24.338375 8179 kubeadm.go:322] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1207 20:02:24.338424 8179 kubeadm.go:322] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1207 20:02:24.338472 8179 kubeadm.go:322] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I1207 20:02:24.415348 8179 kubeadm.go:322] [preflight] Pulling images required for setting up a Kubernetes cluster
I1207 20:02:24.415455 8179 kubeadm.go:322] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1207 20:02:24.415546 8179 kubeadm.go:322] [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
I1207 20:02:24.756552 8179 kubeadm.go:322] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1207 20:02:24.759274 8179 out.go:204] - Generating certificates and keys ...
I1207 20:02:24.759398 8179 kubeadm.go:322] [certs] Using existing ca certificate authority
I1207 20:02:24.759466 8179 kubeadm.go:322] [certs] Using existing apiserver certificate and key on disk
I1207 20:02:26.344397 8179 kubeadm.go:322] [certs] Generating "apiserver-kubelet-client" certificate and key
I1207 20:02:26.737039 8179 kubeadm.go:322] [certs] Generating "front-proxy-ca" certificate and key
I1207 20:02:26.989631 8179 kubeadm.go:322] [certs] Generating "front-proxy-client" certificate and key
I1207 20:02:27.640224 8179 kubeadm.go:322] [certs] Generating "etcd/ca" certificate and key
I1207 20:02:28.090802 8179 kubeadm.go:322] [certs] Generating "etcd/server" certificate and key
I1207 20:02:28.090951 8179 kubeadm.go:322] [certs] etcd/server serving cert is signed for DNS names [addons-946218 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I1207 20:02:28.853386 8179 kubeadm.go:322] [certs] Generating "etcd/peer" certificate and key
I1207 20:02:28.853772 8179 kubeadm.go:322] [certs] etcd/peer serving cert is signed for DNS names [addons-946218 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I1207 20:02:29.103437 8179 kubeadm.go:322] [certs] Generating "etcd/healthcheck-client" certificate and key
I1207 20:02:29.692360 8179 kubeadm.go:322] [certs] Generating "apiserver-etcd-client" certificate and key
I1207 20:02:30.391858 8179 kubeadm.go:322] [certs] Generating "sa" key and public key
I1207 20:02:30.392165 8179 kubeadm.go:322] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1207 20:02:30.662054 8179 kubeadm.go:322] [kubeconfig] Writing "admin.conf" kubeconfig file
I1207 20:02:31.349247 8179 kubeadm.go:322] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1207 20:02:31.810084 8179 kubeadm.go:322] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1207 20:02:31.977168 8179 kubeadm.go:322] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1207 20:02:31.977943 8179 kubeadm.go:322] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1207 20:02:31.980762 8179 kubeadm.go:322] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1207 20:02:31.983512 8179 out.go:204] - Booting up control plane ...
I1207 20:02:31.983642 8179 kubeadm.go:322] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1207 20:02:31.983715 8179 kubeadm.go:322] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1207 20:02:31.983785 8179 kubeadm.go:322] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1207 20:02:32.000369 8179 kubeadm.go:322] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1207 20:02:32.005226 8179 kubeadm.go:322] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1207 20:02:32.005280 8179 kubeadm.go:322] [kubelet-start] Starting the kubelet
I1207 20:02:32.121913 8179 kubeadm.go:322] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
I1207 20:02:41.124212 8179 kubeadm.go:322] [apiclient] All control plane components are healthy after 9.002319 seconds
I1207 20:02:41.124505 8179 kubeadm.go:322] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1207 20:02:41.139604 8179 kubeadm.go:322] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1207 20:02:41.663151 8179 kubeadm.go:322] [upload-certs] Skipping phase. Please see --upload-certs
I1207 20:02:41.663494 8179 kubeadm.go:322] [mark-control-plane] Marking the node addons-946218 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1207 20:02:42.180999 8179 kubeadm.go:322] [bootstrap-token] Using token: czr00f.6ft80texim7ay2mo
I1207 20:02:42.184115 8179 out.go:204] - Configuring RBAC rules ...
I1207 20:02:42.184253 8179 kubeadm.go:322] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1207 20:02:42.191094 8179 kubeadm.go:322] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1207 20:02:42.201910 8179 kubeadm.go:322] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1207 20:02:42.208223 8179 kubeadm.go:322] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1207 20:02:42.213081 8179 kubeadm.go:322] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1207 20:02:42.227199 8179 kubeadm.go:322] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1207 20:02:42.244850 8179 kubeadm.go:322] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1207 20:02:42.498721 8179 kubeadm.go:322] [addons] Applied essential addon: CoreDNS
I1207 20:02:42.599656 8179 kubeadm.go:322] [addons] Applied essential addon: kube-proxy
I1207 20:02:42.601296 8179 kubeadm.go:322]
I1207 20:02:42.601364 8179 kubeadm.go:322] Your Kubernetes control-plane has initialized successfully!
I1207 20:02:42.601370 8179 kubeadm.go:322]
I1207 20:02:42.601441 8179 kubeadm.go:322] To start using your cluster, you need to run the following as a regular user:
I1207 20:02:42.601446 8179 kubeadm.go:322]
I1207 20:02:42.601470 8179 kubeadm.go:322] mkdir -p $HOME/.kube
I1207 20:02:42.602196 8179 kubeadm.go:322] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1207 20:02:42.602250 8179 kubeadm.go:322] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1207 20:02:42.602255 8179 kubeadm.go:322]
I1207 20:02:42.602314 8179 kubeadm.go:322] Alternatively, if you are the root user, you can run:
I1207 20:02:42.602320 8179 kubeadm.go:322]
I1207 20:02:42.602364 8179 kubeadm.go:322] export KUBECONFIG=/etc/kubernetes/admin.conf
I1207 20:02:42.602369 8179 kubeadm.go:322]
I1207 20:02:42.602417 8179 kubeadm.go:322] You should now deploy a pod network to the cluster.
I1207 20:02:42.602500 8179 kubeadm.go:322] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1207 20:02:42.602564 8179 kubeadm.go:322] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1207 20:02:42.602569 8179 kubeadm.go:322]
I1207 20:02:42.602958 8179 kubeadm.go:322] You can now join any number of control-plane nodes by copying certificate authorities
I1207 20:02:42.603036 8179 kubeadm.go:322] and service account keys on each node and then running the following as root:
I1207 20:02:42.603041 8179 kubeadm.go:322]
I1207 20:02:42.603385 8179 kubeadm.go:322] kubeadm join control-plane.minikube.internal:8443 --token czr00f.6ft80texim7ay2mo \
I1207 20:02:42.603486 8179 kubeadm.go:322] --discovery-token-ca-cert-hash sha256:bf03bebb018fea717c072634f3af28c80686bb1a7a8d0c481a3a9bb717d143b1 \
I1207 20:02:42.603757 8179 kubeadm.go:322] --control-plane
I1207 20:02:42.603769 8179 kubeadm.go:322]
I1207 20:02:42.604127 8179 kubeadm.go:322] Then you can join any number of worker nodes by running the following on each as root:
I1207 20:02:42.604137 8179 kubeadm.go:322]
I1207 20:02:42.605112 8179 kubeadm.go:322] kubeadm join control-plane.minikube.internal:8443 --token czr00f.6ft80texim7ay2mo \
I1207 20:02:42.605499 8179 kubeadm.go:322] --discovery-token-ca-cert-hash sha256:bf03bebb018fea717c072634f3af28c80686bb1a7a8d0c481a3a9bb717d143b1
I1207 20:02:42.608806 8179 kubeadm.go:322] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1050-aws\n", err: exit status 1
I1207 20:02:42.608917 8179 kubeadm.go:322] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1207 20:02:42.608936 8179 cni.go:84] Creating CNI manager for ""
I1207 20:02:42.608952 8179 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1207 20:02:42.611019 8179 out.go:177] * Configuring bridge CNI (Container Networking Interface) ...
I1207 20:02:42.612655 8179 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d
I1207 20:02:42.643919 8179 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (457 bytes)
I1207 20:02:42.689707 8179 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1207 20:02:42.689829 8179 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1207 20:02:42.689914 8179 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl label nodes minikube.k8s.io/version=v1.32.0 minikube.k8s.io/commit=e9ef2cce417fa3e029706bd52eaf40ea89608b2c minikube.k8s.io/name=addons-946218 minikube.k8s.io/updated_at=2023_12_07T20_02_42_0700 minikube.k8s.io/primary=true --all --overwrite --kubeconfig=/var/lib/minikube/kubeconfig
I1207 20:02:43.019442 8179 ops.go:34] apiserver oom_adj: -16
I1207 20:02:43.019536 8179 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1207 20:02:43.116861 8179 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1207 20:02:43.711218 8179 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1207 20:02:44.211311 8179 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1207 20:02:44.710674 8179 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1207 20:02:45.211529 8179 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1207 20:02:45.711382 8179 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1207 20:02:46.210649 8179 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1207 20:02:46.711192 8179 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1207 20:02:47.210710 8179 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1207 20:02:47.711296 8179 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1207 20:02:48.210661 8179 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1207 20:02:48.711398 8179 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1207 20:02:49.210703 8179 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1207 20:02:49.711088 8179 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1207 20:02:50.211641 8179 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1207 20:02:50.711167 8179 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1207 20:02:51.210848 8179 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1207 20:02:51.711423 8179 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1207 20:02:52.211572 8179 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1207 20:02:52.711162 8179 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1207 20:02:53.211197 8179 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1207 20:02:53.710638 8179 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1207 20:02:54.211550 8179 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1207 20:02:54.710963 8179 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1207 20:02:55.210623 8179 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1207 20:02:55.711208 8179 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1207 20:02:55.819316 8179 kubeadm.go:1088] duration metric: took 13.129530032s to wait for elevateKubeSystemPrivileges.
I1207 20:02:55.819342 8179 kubeadm.go:406] StartCluster complete in 31.64463978s
I1207 20:02:55.819357 8179 settings.go:142] acquiring lock: {Name:mk4e1ad85078db32f53ce2cb878f95b1dc79d720 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1207 20:02:55.819467 8179 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/17719-2292/kubeconfig
I1207 20:02:55.820154 8179 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/17719-2292/kubeconfig: {Name:mkb58bbc3586feb84db8c4c89653a5136ccfc407 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1207 20:02:55.820449 8179 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.4/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1207 20:02:55.820847 8179 config.go:182] Loaded profile config "addons-946218": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.28.4
I1207 20:02:55.821030 8179 addons.go:499] enable addons start: toEnable=map[ambassador:false auto-pause:false cloud-spanner:true csi-hostpath-driver:true dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:true gvisor:false headlamp:false helm-tiller:false inaccel:false ingress:true ingress-dns:true inspektor-gadget:true istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-device-plugin:true nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:true registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:true volumesnapshots:true]
I1207 20:02:55.821148 8179 addons.go:69] Setting volumesnapshots=true in profile "addons-946218"
I1207 20:02:55.821164 8179 addons.go:231] Setting addon volumesnapshots=true in "addons-946218"
I1207 20:02:55.821223 8179 host.go:66] Checking if "addons-946218" exists ...
I1207 20:02:55.821777 8179 cli_runner.go:164] Run: docker container inspect addons-946218 --format={{.State.Status}}
I1207 20:02:55.822939 8179 addons.go:69] Setting inspektor-gadget=true in profile "addons-946218"
I1207 20:02:55.822962 8179 addons.go:231] Setting addon inspektor-gadget=true in "addons-946218"
I1207 20:02:55.822996 8179 host.go:66] Checking if "addons-946218" exists ...
I1207 20:02:55.823491 8179 cli_runner.go:164] Run: docker container inspect addons-946218 --format={{.State.Status}}
I1207 20:02:55.825594 8179 addons.go:69] Setting cloud-spanner=true in profile "addons-946218"
I1207 20:02:55.825623 8179 addons.go:231] Setting addon cloud-spanner=true in "addons-946218"
I1207 20:02:55.825668 8179 host.go:66] Checking if "addons-946218" exists ...
I1207 20:02:55.826163 8179 cli_runner.go:164] Run: docker container inspect addons-946218 --format={{.State.Status}}
I1207 20:02:55.826408 8179 addons.go:69] Setting metrics-server=true in profile "addons-946218"
I1207 20:02:55.826426 8179 addons.go:231] Setting addon metrics-server=true in "addons-946218"
I1207 20:02:55.826467 8179 host.go:66] Checking if "addons-946218" exists ...
I1207 20:02:55.826943 8179 cli_runner.go:164] Run: docker container inspect addons-946218 --format={{.State.Status}}
I1207 20:02:55.837649 8179 addons.go:69] Setting csi-hostpath-driver=true in profile "addons-946218"
I1207 20:02:55.837704 8179 addons.go:231] Setting addon csi-hostpath-driver=true in "addons-946218"
I1207 20:02:55.837753 8179 host.go:66] Checking if "addons-946218" exists ...
I1207 20:02:55.838276 8179 cli_runner.go:164] Run: docker container inspect addons-946218 --format={{.State.Status}}
I1207 20:02:55.843915 8179 addons.go:69] Setting default-storageclass=true in profile "addons-946218"
I1207 20:02:55.843943 8179 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "addons-946218"
I1207 20:02:55.844310 8179 cli_runner.go:164] Run: docker container inspect addons-946218 --format={{.State.Status}}
I1207 20:02:55.853216 8179 addons.go:69] Setting gcp-auth=true in profile "addons-946218"
I1207 20:02:55.853246 8179 mustload.go:65] Loading cluster: addons-946218
I1207 20:02:55.853485 8179 config.go:182] Loaded profile config "addons-946218": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.28.4
I1207 20:02:55.856687 8179 addons.go:69] Setting ingress=true in profile "addons-946218"
I1207 20:02:55.858993 8179 addons.go:231] Setting addon ingress=true in "addons-946218"
I1207 20:02:55.859059 8179 host.go:66] Checking if "addons-946218" exists ...
I1207 20:02:55.859488 8179 cli_runner.go:164] Run: docker container inspect addons-946218 --format={{.State.Status}}
I1207 20:02:55.856762 8179 addons.go:69] Setting nvidia-device-plugin=true in profile "addons-946218"
I1207 20:02:55.874738 8179 addons.go:231] Setting addon nvidia-device-plugin=true in "addons-946218"
I1207 20:02:55.874796 8179 host.go:66] Checking if "addons-946218" exists ...
I1207 20:02:55.875235 8179 cli_runner.go:164] Run: docker container inspect addons-946218 --format={{.State.Status}}
I1207 20:02:55.884026 8179 cli_runner.go:164] Run: docker container inspect addons-946218 --format={{.State.Status}}
I1207 20:02:55.856769 8179 addons.go:69] Setting registry=true in profile "addons-946218"
I1207 20:02:55.897150 8179 addons.go:231] Setting addon registry=true in "addons-946218"
I1207 20:02:55.897208 8179 host.go:66] Checking if "addons-946218" exists ...
I1207 20:02:55.897661 8179 cli_runner.go:164] Run: docker container inspect addons-946218 --format={{.State.Status}}
I1207 20:02:55.856780 8179 addons.go:69] Setting storage-provisioner=true in profile "addons-946218"
I1207 20:02:55.915585 8179 addons.go:231] Setting addon storage-provisioner=true in "addons-946218"
I1207 20:02:55.915643 8179 host.go:66] Checking if "addons-946218" exists ...
I1207 20:02:55.916098 8179 cli_runner.go:164] Run: docker container inspect addons-946218 --format={{.State.Status}}
I1207 20:02:55.856787 8179 addons.go:69] Setting storage-provisioner-rancher=true in profile "addons-946218"
I1207 20:02:55.949637 8179 addons_storage_classes.go:33] enableOrDisableStorageClasses storage-provisioner-rancher=true on "addons-946218"
I1207 20:02:55.949996 8179 cli_runner.go:164] Run: docker container inspect addons-946218 --format={{.State.Status}}
I1207 20:02:55.972784 8179 addons.go:69] Setting ingress-dns=true in profile "addons-946218"
I1207 20:02:55.972817 8179 addons.go:231] Setting addon ingress-dns=true in "addons-946218"
I1207 20:02:55.972878 8179 host.go:66] Checking if "addons-946218" exists ...
I1207 20:02:55.973447 8179 cli_runner.go:164] Run: docker container inspect addons-946218 --format={{.State.Status}}
I1207 20:02:56.073867 8179 out.go:177] - Using image ghcr.io/inspektor-gadget/inspektor-gadget:v0.23.1
I1207 20:02:56.076347 8179 addons.go:423] installing /etc/kubernetes/addons/ig-namespace.yaml
I1207 20:02:56.076366 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-namespace.yaml (55 bytes)
I1207 20:02:56.113485 8179 out.go:177] - Using image registry.k8s.io/metrics-server/metrics-server:v0.6.4
I1207 20:02:56.115810 8179 out.go:177] - Using image gcr.io/cloud-spanner-emulator/emulator:1.5.12
I1207 20:02:56.120728 8179 addons.go:423] installing /etc/kubernetes/addons/deployment.yaml
I1207 20:02:56.120749 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/deployment.yaml (1004 bytes)
I1207 20:02:56.120813 8179 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-946218
I1207 20:02:56.129369 8179 out.go:177] - Using image registry.k8s.io/ingress-nginx/controller:v1.9.4
I1207 20:02:56.131275 8179 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20231011-8b53cabe0
I1207 20:02:56.133107 8179 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20231011-8b53cabe0
I1207 20:02:56.115906 8179 addons.go:423] installing /etc/kubernetes/addons/metrics-apiservice.yaml
I1207 20:02:56.073823 8179 addons.go:231] Setting addon default-storageclass=true in "addons-946218"
I1207 20:02:56.077256 8179 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-946218
I1207 20:02:56.139250 8179 host.go:66] Checking if "addons-946218" exists ...
I1207 20:02:56.148814 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
I1207 20:02:56.160829 8179 out.go:177] - Using image registry.k8s.io/sig-storage/snapshot-controller:v6.1.0
I1207 20:02:56.160871 8179 host.go:66] Checking if "addons-946218" exists ...
I1207 20:02:56.164850 8179 out.go:177] - Using image registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.7.0
I1207 20:02:56.170337 8179 kapi.go:248] "coredns" deployment in "kube-system" namespace and "addons-946218" context rescaled to 1 replicas
I1207 20:02:56.172796 8179 addons.go:423] installing /etc/kubernetes/addons/ingress-deploy.yaml
I1207 20:02:56.172867 8179 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-946218
I1207 20:02:56.179048 8179 addons.go:423] installing /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml
I1207 20:02:56.179071 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml (934 bytes)
I1207 20:02:56.179129 8179 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-946218
I1207 20:02:56.192224 8179 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.4/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.4/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1207 20:02:56.176898 8179 out.go:177] - Using image nvcr.io/nvidia/k8s-device-plugin:v0.14.3
I1207 20:02:56.177372 8179 cli_runner.go:164] Run: docker container inspect addons-946218 --format={{.State.Status}}
I1207 20:02:56.177393 8179 start.go:223] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.28.4 ContainerRuntime:docker ControlPlane:true Worker:true}
I1207 20:02:56.177410 8179 out.go:177] - Using image docker.io/registry:2.8.3
I1207 20:02:56.177424 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-deploy.yaml (16103 bytes)
I1207 20:02:56.196679 8179 addons.go:423] installing /etc/kubernetes/addons/nvidia-device-plugin.yaml
I1207 20:02:56.197790 8179 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-946218
I1207 20:02:56.206327 8179 out.go:177] * Verifying Kubernetes components...
I1207 20:02:56.206376 8179 out.go:177] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1207 20:02:56.206395 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/nvidia-device-plugin.yaml (1966 bytes)
I1207 20:02:56.213074 8179 out.go:177] - Using image registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.6.0
I1207 20:02:56.214569 8179 out.go:177] - Using image gcr.io/k8s-minikube/kube-registry-proxy:0.0.5
I1207 20:02:56.214650 8179 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-946218
I1207 20:02:56.216249 8179 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1207 20:02:56.217921 8179 addons.go:423] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1207 20:02:56.241376 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1207 20:02:56.241442 8179 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-946218
I1207 20:02:56.269996 8179 addons.go:231] Setting addon storage-provisioner-rancher=true in "addons-946218"
I1207 20:02:56.270036 8179 host.go:66] Checking if "addons-946218" exists ...
I1207 20:02:56.270467 8179 cli_runner.go:164] Run: docker container inspect addons-946218 --format={{.State.Status}}
I1207 20:02:56.282022 8179 out.go:177] - Using image gcr.io/k8s-minikube/minikube-ingress-dns:0.0.2
I1207 20:02:56.283679 8179 addons.go:423] installing /etc/kubernetes/addons/ingress-dns-pod.yaml
I1207 20:02:56.283697 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-dns-pod.yaml (2442 bytes)
I1207 20:02:56.283757 8179 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-946218
I1207 20:02:56.281953 8179 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/17719-2292/.minikube/machines/addons-946218/id_rsa Username:docker}
I1207 20:02:56.292955 8179 out.go:177] - Using image registry.k8s.io/sig-storage/hostpathplugin:v1.9.0
I1207 20:02:56.282381 8179 addons.go:423] installing /etc/kubernetes/addons/registry-rc.yaml
I1207 20:02:56.294790 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-rc.yaml (798 bytes)
I1207 20:02:56.294865 8179 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-946218
I1207 20:02:56.325995 8179 out.go:177] - Using image registry.k8s.io/sig-storage/livenessprobe:v2.8.0
I1207 20:02:56.330243 8179 out.go:177] - Using image registry.k8s.io/sig-storage/csi-resizer:v1.6.0
I1207 20:02:56.335920 8179 out.go:177] - Using image registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0
I1207 20:02:56.337704 8179 out.go:177] - Using image registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
I1207 20:02:56.341472 8179 out.go:177] - Using image registry.k8s.io/sig-storage/csi-attacher:v4.0.0
I1207 20:02:56.344150 8179 addons.go:423] installing /etc/kubernetes/addons/rbac-external-attacher.yaml
I1207 20:02:56.344172 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/rbac-external-attacher.yaml (3073 bytes)
I1207 20:02:56.344236 8179 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-946218
I1207 20:02:56.350867 8179 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/17719-2292/.minikube/machines/addons-946218/id_rsa Username:docker}
I1207 20:02:56.360653 8179 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/17719-2292/.minikube/machines/addons-946218/id_rsa Username:docker}
I1207 20:02:56.419417 8179 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/17719-2292/.minikube/machines/addons-946218/id_rsa Username:docker}
I1207 20:02:56.449240 8179 addons.go:423] installing /etc/kubernetes/addons/storageclass.yaml
I1207 20:02:56.449260 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1207 20:02:56.449321 8179 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-946218
I1207 20:02:56.485693 8179 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/17719-2292/.minikube/machines/addons-946218/id_rsa Username:docker}
I1207 20:02:56.503904 8179 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/17719-2292/.minikube/machines/addons-946218/id_rsa Username:docker}
I1207 20:02:56.507464 8179 out.go:177] - Using image docker.io/rancher/local-path-provisioner:v0.0.22
I1207 20:02:56.510117 8179 out.go:177] - Using image docker.io/busybox:stable
I1207 20:02:56.505167 8179 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/17719-2292/.minikube/machines/addons-946218/id_rsa Username:docker}
I1207 20:02:56.514331 8179 addons.go:423] installing /etc/kubernetes/addons/storage-provisioner-rancher.yaml
I1207 20:02:56.514348 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner-rancher.yaml (3113 bytes)
I1207 20:02:56.514418 8179 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-946218
I1207 20:02:56.529590 8179 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/17719-2292/.minikube/machines/addons-946218/id_rsa Username:docker}
I1207 20:02:56.540102 8179 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/17719-2292/.minikube/machines/addons-946218/id_rsa Username:docker}
I1207 20:02:56.556100 8179 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/17719-2292/.minikube/machines/addons-946218/id_rsa Username:docker}
I1207 20:02:56.590150 8179 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/17719-2292/.minikube/machines/addons-946218/id_rsa Username:docker}
I1207 20:02:56.590935 8179 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/17719-2292/.minikube/machines/addons-946218/id_rsa Username:docker}
I1207 20:02:57.053579 8179 addons.go:423] installing /etc/kubernetes/addons/rbac-hostpath.yaml
I1207 20:02:57.053603 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/rbac-hostpath.yaml (4266 bytes)
I1207 20:02:57.071621 8179 addons.go:423] installing /etc/kubernetes/addons/registry-svc.yaml
I1207 20:02:57.071648 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-svc.yaml (398 bytes)
I1207 20:02:57.221478 8179 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1207 20:02:57.243666 8179 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml
I1207 20:02:57.388043 8179 addons.go:423] installing /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml
I1207 20:02:57.388113 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml (3038 bytes)
I1207 20:02:57.395211 8179 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/deployment.yaml
I1207 20:02:57.399848 8179 addons.go:423] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
I1207 20:02:57.399872 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1907 bytes)
I1207 20:02:57.425016 8179 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml
I1207 20:02:57.572200 8179 addons.go:423] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml
I1207 20:02:57.572226 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml (6471 bytes)
I1207 20:02:57.606937 8179 addons.go:423] installing /etc/kubernetes/addons/registry-proxy.yaml
I1207 20:02:57.606960 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-proxy.yaml (947 bytes)
I1207 20:02:57.667604 8179 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1207 20:02:57.693856 8179 addons.go:423] installing /etc/kubernetes/addons/ig-serviceaccount.yaml
I1207 20:02:57.693880 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-serviceaccount.yaml (80 bytes)
I1207 20:02:57.694353 8179 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml
I1207 20:02:57.701956 8179 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml
I1207 20:02:57.752545 8179 addons.go:423] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml
I1207 20:02:57.752617 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml (23126 bytes)
I1207 20:02:57.813141 8179 addons.go:423] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
I1207 20:02:57.813215 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
I1207 20:02:57.816617 8179 addons.go:423] installing /etc/kubernetes/addons/rbac-external-provisioner.yaml
I1207 20:02:57.816687 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/rbac-external-provisioner.yaml (4442 bytes)
I1207 20:02:57.874570 8179 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml
I1207 20:02:58.135310 8179 addons.go:423] installing /etc/kubernetes/addons/ig-role.yaml
I1207 20:02:58.135382 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-role.yaml (210 bytes)
I1207 20:02:58.189665 8179 addons.go:423] installing /etc/kubernetes/addons/metrics-server-service.yaml
I1207 20:02:58.189738 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
I1207 20:02:58.241407 8179 addons.go:423] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml
I1207 20:02:58.241479 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml (19582 bytes)
I1207 20:02:58.294413 8179 addons.go:423] installing /etc/kubernetes/addons/rbac-external-resizer.yaml
I1207 20:02:58.294484 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/rbac-external-resizer.yaml (2943 bytes)
I1207 20:02:58.401220 8179 addons.go:423] installing /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml
I1207 20:02:58.401245 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml (3545 bytes)
I1207 20:02:58.404581 8179 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
I1207 20:02:58.436094 8179 addons.go:423] installing /etc/kubernetes/addons/ig-rolebinding.yaml
I1207 20:02:58.436120 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-rolebinding.yaml (244 bytes)
I1207 20:02:58.507497 8179 addons.go:423] installing /etc/kubernetes/addons/rbac-external-snapshotter.yaml
I1207 20:02:58.507522 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/rbac-external-snapshotter.yaml (3149 bytes)
I1207 20:02:58.542717 8179 addons.go:423] installing /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I1207 20:02:58.542751 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml (1475 bytes)
I1207 20:02:58.592609 8179 addons.go:423] installing /etc/kubernetes/addons/ig-clusterrole.yaml
I1207 20:02:58.592631 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-clusterrole.yaml (1485 bytes)
I1207 20:02:58.715487 8179 addons.go:423] installing /etc/kubernetes/addons/ig-clusterrolebinding.yaml
I1207 20:02:58.715511 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-clusterrolebinding.yaml (274 bytes)
I1207 20:02:58.736283 8179 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I1207 20:02:58.751426 8179 addons.go:423] installing /etc/kubernetes/addons/csi-hostpath-attacher.yaml
I1207 20:02:58.751457 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-attacher.yaml (2143 bytes)
I1207 20:02:58.910820 8179 addons.go:423] installing /etc/kubernetes/addons/ig-crd.yaml
I1207 20:02:58.910845 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-crd.yaml (5216 bytes)
I1207 20:02:59.064174 8179 addons.go:423] installing /etc/kubernetes/addons/ig-daemonset.yaml
I1207 20:02:59.064249 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-daemonset.yaml (7735 bytes)
I1207 20:02:59.156273 8179 addons.go:423] installing /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml
I1207 20:02:59.156345 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml (1274 bytes)
I1207 20:02:59.270418 8179 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.4/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.4/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -": (3.078158032s)
I1207 20:02:59.270517 8179 ssh_runner.go:235] Completed: sudo systemctl is-active --quiet service kubelet: (3.013566939s)
I1207 20:02:59.271341 8179 node_ready.go:35] waiting up to 6m0s for node "addons-946218" to be "Ready" ...
I1207 20:02:59.271570 8179 start.go:929] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
I1207 20:02:59.274678 8179 node_ready.go:49] node "addons-946218" has status "Ready":"True"
I1207 20:02:59.274741 8179 node_ready.go:38] duration metric: took 3.382282ms waiting for node "addons-946218" to be "Ready" ...
I1207 20:02:59.274765 8179 pod_ready.go:35] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I1207 20:02:59.281889 8179 pod_ready.go:78] waiting up to 6m0s for pod "coredns-5dd5756b68-87sl4" in "kube-system" namespace to be "Ready" ...
I1207 20:02:59.386812 8179 addons.go:423] installing /etc/kubernetes/addons/csi-hostpath-plugin.yaml
I1207 20:02:59.386880 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-plugin.yaml (8201 bytes)
I1207 20:02:59.423455 8179 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/ig-namespace.yaml -f /etc/kubernetes/addons/ig-serviceaccount.yaml -f /etc/kubernetes/addons/ig-role.yaml -f /etc/kubernetes/addons/ig-rolebinding.yaml -f /etc/kubernetes/addons/ig-clusterrole.yaml -f /etc/kubernetes/addons/ig-clusterrolebinding.yaml -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-daemonset.yaml
I1207 20:02:59.521689 8179 addons.go:423] installing /etc/kubernetes/addons/csi-hostpath-resizer.yaml
I1207 20:02:59.521710 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-resizer.yaml (2191 bytes)
I1207 20:02:59.629453 8179 addons.go:423] installing /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
I1207 20:02:59.629475 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-storageclass.yaml (846 bytes)
I1207 20:02:59.830754 8179 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: (2.609237817s)
I1207 20:02:59.922857 8179 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
I1207 20:03:01.300827 8179 pod_ready.go:102] pod "coredns-5dd5756b68-87sl4" in "kube-system" namespace has status "Ready":"False"
I1207 20:03:02.593418 8179 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml: (5.349715377s)
I1207 20:03:02.593632 8179 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/deployment.yaml: (5.19839646s)
I1207 20:03:02.593704 8179 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml: (5.168663834s)
I1207 20:03:02.593759 8179 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (4.926131884s)
I1207 20:03:02.593796 8179 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml: (4.899423553s)
I1207 20:03:02.787682 8179 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_application_credentials.json (162 bytes)
I1207 20:03:02.787766 8179 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-946218
I1207 20:03:02.822441 8179 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/17719-2292/.minikube/machines/addons-946218/id_rsa Username:docker}
I1207 20:03:03.305340 8179 pod_ready.go:102] pod "coredns-5dd5756b68-87sl4" in "kube-system" namespace has status "Ready":"False"
I1207 20:03:03.455157 8179 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_cloud_project (12 bytes)
I1207 20:03:03.587101 8179 addons.go:231] Setting addon gcp-auth=true in "addons-946218"
I1207 20:03:03.587145 8179 host.go:66] Checking if "addons-946218" exists ...
I1207 20:03:03.587627 8179 cli_runner.go:164] Run: docker container inspect addons-946218 --format={{.State.Status}}
I1207 20:03:03.633818 8179 ssh_runner.go:195] Run: cat /var/lib/minikube/google_application_credentials.json
I1207 20:03:03.633867 8179 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-946218
I1207 20:03:03.667117 8179 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/17719-2292/.minikube/machines/addons-946218/id_rsa Username:docker}
I1207 20:03:05.564465 8179 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml: (7.862394563s)
I1207 20:03:05.564447 8179 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml: (7.689800195s)
I1207 20:03:05.564566 8179 addons.go:467] Verifying addon registry=true in "addons-946218"
I1207 20:03:05.567112 8179 out.go:177] * Verifying registry addon...
I1207 20:03:05.564757 8179 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (7.16014763s)
I1207 20:03:05.564528 8179 addons.go:467] Verifying addon ingress=true in "addons-946218"
I1207 20:03:05.565046 8179 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (6.82873131s)
I1207 20:03:05.565141 8179 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/ig-namespace.yaml -f /etc/kubernetes/addons/ig-serviceaccount.yaml -f /etc/kubernetes/addons/ig-role.yaml -f /etc/kubernetes/addons/ig-rolebinding.yaml -f /etc/kubernetes/addons/ig-clusterrole.yaml -f /etc/kubernetes/addons/ig-clusterrolebinding.yaml -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-daemonset.yaml: (6.14165536s)
I1207 20:03:05.567285 8179 addons.go:467] Verifying addon metrics-server=true in "addons-946218"
I1207 20:03:05.569564 8179 out.go:177] * Verifying ingress addon...
W1207 20:03:05.567467 8179 addons.go:449] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
stdout:
customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
serviceaccount/snapshot-controller created
clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
deployment.apps/snapshot-controller created
stderr:
error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
ensure CRDs are installed first
I1207 20:03:05.570523 8179 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=registry" in ns "kube-system" ...
I1207 20:03:05.572843 8179 kapi.go:75] Waiting for pod with label "app.kubernetes.io/name=ingress-nginx" in ns "ingress-nginx" ...
I1207 20:03:05.572982 8179 retry.go:31] will retry after 139.689825ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
stdout:
customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
serviceaccount/snapshot-controller created
clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
deployment.apps/snapshot-controller created
stderr:
error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
ensure CRDs are installed first
I1207 20:03:05.577616 8179 kapi.go:86] Found 3 Pods for label selector app.kubernetes.io/name=ingress-nginx
I1207 20:03:05.578249 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:05.578657 8179 kapi.go:86] Found 2 Pods for label selector kubernetes.io/minikube-addons=registry
I1207 20:03:05.578691 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1207 20:03:05.584513 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:05.584981 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1207 20:03:05.713266 8179 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I1207 20:03:05.807022 8179 pod_ready.go:102] pod "coredns-5dd5756b68-87sl4" in "kube-system" namespace has status "Ready":"False"
I1207 20:03:06.119866 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:06.120366 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1207 20:03:06.604214 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1207 20:03:06.605220 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:06.971649 8179 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml: (7.048747799s)
I1207 20:03:06.971742 8179 addons.go:467] Verifying addon csi-hostpath-driver=true in "addons-946218"
I1207 20:03:06.973794 8179 out.go:177] * Verifying csi-hostpath-driver addon...
I1207 20:03:06.971953 8179 ssh_runner.go:235] Completed: cat /var/lib/minikube/google_application_credentials.json: (3.338115664s)
I1207 20:03:06.976692 8179 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=csi-hostpath-driver" in ns "kube-system" ...
I1207 20:03:06.978532 8179 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20231011-8b53cabe0
I1207 20:03:06.980581 8179 out.go:177] - Using image gcr.io/k8s-minikube/gcp-auth-webhook:v0.1.0
I1207 20:03:06.982301 8179 addons.go:423] installing /etc/kubernetes/addons/gcp-auth-ns.yaml
I1207 20:03:06.982328 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/gcp-auth-ns.yaml (700 bytes)
I1207 20:03:07.001563 8179 kapi.go:86] Found 3 Pods for label selector kubernetes.io/minikube-addons=csi-hostpath-driver
I1207 20:03:07.001594 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:07.011044 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:07.092691 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1207 20:03:07.094042 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:07.131001 8179 addons.go:423] installing /etc/kubernetes/addons/gcp-auth-service.yaml
I1207 20:03:07.131074 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/gcp-auth-service.yaml (788 bytes)
I1207 20:03:07.232835 8179 addons.go:423] installing /etc/kubernetes/addons/gcp-auth-webhook.yaml
I1207 20:03:07.232865 8179 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/gcp-auth-webhook.yaml (5432 bytes)
I1207 20:03:07.278603 8179 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml
I1207 20:03:07.517869 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:07.591423 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1207 20:03:07.592547 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:08.018206 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:08.091406 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1207 20:03:08.092921 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:08.326242 8179 pod_ready.go:102] pod "coredns-5dd5756b68-87sl4" in "kube-system" namespace has status "Ready":"False"
I1207 20:03:08.461661 8179 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (2.748300983s)
I1207 20:03:08.518022 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:08.604109 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:08.604895 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1207 20:03:08.752059 8179 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml: (1.473418177s)
I1207 20:03:08.755065 8179 addons.go:467] Verifying addon gcp-auth=true in "addons-946218"
I1207 20:03:08.758283 8179 out.go:177] * Verifying gcp-auth addon...
I1207 20:03:08.761372 8179 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=gcp-auth" in ns "gcp-auth" ...
I1207 20:03:08.766760 8179 kapi.go:86] Found 1 Pods for label selector kubernetes.io/minikube-addons=gcp-auth
I1207 20:03:08.766783 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:08.770022 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:09.020370 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:09.093273 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:09.094563 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1207 20:03:09.274708 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:09.517854 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:09.592650 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1207 20:03:09.593855 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:09.777669 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:10.020186 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:10.093578 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:10.094892 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1207 20:03:10.273951 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:10.517626 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:10.590495 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1207 20:03:10.591593 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:10.778249 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:10.801413 8179 pod_ready.go:102] pod "coredns-5dd5756b68-87sl4" in "kube-system" namespace has status "Ready":"False"
I1207 20:03:11.017647 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:11.089543 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1207 20:03:11.090699 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:11.274460 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:11.517293 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:11.595828 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1207 20:03:11.600227 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:11.775773 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:12.017439 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:12.091990 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1207 20:03:12.092960 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:12.273751 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:12.517131 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:12.590589 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:12.591301 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1207 20:03:12.774209 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:13.017476 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:13.089073 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:13.089829 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1207 20:03:13.274200 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:13.300621 8179 pod_ready.go:102] pod "coredns-5dd5756b68-87sl4" in "kube-system" namespace has status "Ready":"False"
I1207 20:03:13.517443 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:13.590043 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:13.591456 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1207 20:03:13.774104 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:14.017681 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:14.092196 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1207 20:03:14.093324 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:14.273834 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:14.517784 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:14.589634 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1207 20:03:14.590639 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:14.774588 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:15.029321 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:15.095434 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:15.114347 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1207 20:03:15.274172 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:15.516388 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:15.591601 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:15.592971 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1207 20:03:15.775322 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:15.800629 8179 pod_ready.go:102] pod "coredns-5dd5756b68-87sl4" in "kube-system" namespace has status "Ready":"False"
I1207 20:03:16.017620 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:16.091298 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:16.091832 8179 kapi.go:107] duration metric: took 10.521312733s to wait for kubernetes.io/minikube-addons=registry ...
I1207 20:03:16.274397 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:16.517591 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:16.589040 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:16.773968 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:17.027362 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:17.089685 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:17.275415 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:17.517243 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:17.589750 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:17.774226 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:17.800831 8179 pod_ready.go:102] pod "coredns-5dd5756b68-87sl4" in "kube-system" namespace has status "Ready":"False"
I1207 20:03:18.018448 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:18.089613 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:18.274836 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:18.516977 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:18.589028 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:18.773733 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:19.018440 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:19.089355 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:19.274188 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:19.517654 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:19.589549 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:19.774475 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:20.022259 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:20.089824 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:20.273438 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:20.301347 8179 pod_ready.go:102] pod "coredns-5dd5756b68-87sl4" in "kube-system" namespace has status "Ready":"False"
I1207 20:03:20.517444 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:20.589294 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:20.773768 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:21.017007 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:21.089430 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:21.274078 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:21.517280 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:21.589272 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:21.773978 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:22.018159 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:22.089474 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:22.273828 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:22.516060 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:22.589395 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:22.774048 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:22.800307 8179 pod_ready.go:102] pod "coredns-5dd5756b68-87sl4" in "kube-system" namespace has status "Ready":"False"
I1207 20:03:23.018021 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:23.090058 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:23.273437 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:23.517485 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:23.589762 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:23.773977 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:24.018489 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:24.089638 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:24.274537 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:24.518290 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:24.588626 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:24.775152 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:24.802003 8179 pod_ready.go:102] pod "coredns-5dd5756b68-87sl4" in "kube-system" namespace has status "Ready":"False"
I1207 20:03:25.017629 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:25.090152 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:25.273691 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:25.517485 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:25.589297 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:25.773865 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:26.020031 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:26.089562 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:26.274379 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:26.516984 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:26.590215 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:26.773817 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:27.017865 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:27.089987 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:27.274002 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:27.300659 8179 pod_ready.go:102] pod "coredns-5dd5756b68-87sl4" in "kube-system" namespace has status "Ready":"False"
I1207 20:03:27.523186 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:27.590410 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:27.774564 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:28.022543 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:28.089331 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:28.274375 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:28.516827 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:28.589054 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:28.774220 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:29.017008 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:29.089681 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:29.274782 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:29.301994 8179 pod_ready.go:102] pod "coredns-5dd5756b68-87sl4" in "kube-system" namespace has status "Ready":"False"
I1207 20:03:29.522763 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:29.589689 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:29.775605 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:30.035442 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:30.092359 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:30.276985 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:30.516790 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:30.589566 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:30.774237 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:31.018160 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:31.089367 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:31.274389 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:31.517821 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:31.589426 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:31.773883 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:31.801254 8179 pod_ready.go:102] pod "coredns-5dd5756b68-87sl4" in "kube-system" namespace has status "Ready":"False"
I1207 20:03:32.017113 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:32.089708 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:32.276653 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:32.518217 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:32.589380 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:32.774044 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:33.017855 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:33.089773 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:33.274783 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:33.517187 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:33.598000 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:33.774449 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:33.806803 8179 pod_ready.go:102] pod "coredns-5dd5756b68-87sl4" in "kube-system" namespace has status "Ready":"False"
I1207 20:03:34.018435 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:34.090518 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:34.274433 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:34.518464 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:34.594343 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:34.773739 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:35.022048 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:35.089908 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:35.274282 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:35.516681 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:35.588864 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:35.773560 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:36.018935 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:36.094083 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:36.286117 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:36.300608 8179 pod_ready.go:102] pod "coredns-5dd5756b68-87sl4" in "kube-system" namespace has status "Ready":"False"
I1207 20:03:36.519854 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:36.588898 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:36.773619 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:36.804080 8179 pod_ready.go:92] pod "coredns-5dd5756b68-87sl4" in "kube-system" namespace has status "Ready":"True"
I1207 20:03:36.804102 8179 pod_ready.go:81] duration metric: took 37.522132668s waiting for pod "coredns-5dd5756b68-87sl4" in "kube-system" namespace to be "Ready" ...
I1207 20:03:36.804113 8179 pod_ready.go:78] waiting up to 6m0s for pod "etcd-addons-946218" in "kube-system" namespace to be "Ready" ...
I1207 20:03:36.821446 8179 pod_ready.go:92] pod "etcd-addons-946218" in "kube-system" namespace has status "Ready":"True"
I1207 20:03:36.821474 8179 pod_ready.go:81] duration metric: took 17.35357ms waiting for pod "etcd-addons-946218" in "kube-system" namespace to be "Ready" ...
I1207 20:03:36.821486 8179 pod_ready.go:78] waiting up to 6m0s for pod "kube-apiserver-addons-946218" in "kube-system" namespace to be "Ready" ...
I1207 20:03:36.829748 8179 pod_ready.go:92] pod "kube-apiserver-addons-946218" in "kube-system" namespace has status "Ready":"True"
I1207 20:03:36.829846 8179 pod_ready.go:81] duration metric: took 8.347442ms waiting for pod "kube-apiserver-addons-946218" in "kube-system" namespace to be "Ready" ...
I1207 20:03:36.829905 8179 pod_ready.go:78] waiting up to 6m0s for pod "kube-controller-manager-addons-946218" in "kube-system" namespace to be "Ready" ...
I1207 20:03:36.839519 8179 pod_ready.go:92] pod "kube-controller-manager-addons-946218" in "kube-system" namespace has status "Ready":"True"
I1207 20:03:36.839588 8179 pod_ready.go:81] duration metric: took 9.657946ms waiting for pod "kube-controller-manager-addons-946218" in "kube-system" namespace to be "Ready" ...
I1207 20:03:36.839614 8179 pod_ready.go:78] waiting up to 6m0s for pod "kube-proxy-t6tdx" in "kube-system" namespace to be "Ready" ...
I1207 20:03:36.849787 8179 pod_ready.go:92] pod "kube-proxy-t6tdx" in "kube-system" namespace has status "Ready":"True"
I1207 20:03:36.849858 8179 pod_ready.go:81] duration metric: took 10.225071ms waiting for pod "kube-proxy-t6tdx" in "kube-system" namespace to be "Ready" ...
I1207 20:03:36.849882 8179 pod_ready.go:78] waiting up to 6m0s for pod "kube-scheduler-addons-946218" in "kube-system" namespace to be "Ready" ...
I1207 20:03:37.018137 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:37.089735 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:37.198428 8179 pod_ready.go:92] pod "kube-scheduler-addons-946218" in "kube-system" namespace has status "Ready":"True"
I1207 20:03:37.198497 8179 pod_ready.go:81] duration metric: took 348.594806ms waiting for pod "kube-scheduler-addons-946218" in "kube-system" namespace to be "Ready" ...
I1207 20:03:37.198522 8179 pod_ready.go:38] duration metric: took 37.923732596s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I1207 20:03:37.198572 8179 api_server.go:52] waiting for apiserver process to appear ...
I1207 20:03:37.198655 8179 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1207 20:03:37.218270 8179 api_server.go:72] duration metric: took 41.023833459s to wait for apiserver process to appear ...
I1207 20:03:37.218337 8179 api_server.go:88] waiting for apiserver healthz status ...
I1207 20:03:37.218367 8179 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I1207 20:03:37.228279 8179 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I1207 20:03:37.229899 8179 api_server.go:141] control plane version: v1.28.4
I1207 20:03:37.229919 8179 api_server.go:131] duration metric: took 11.563834ms to wait for apiserver health ...
I1207 20:03:37.229928 8179 system_pods.go:43] waiting for kube-system pods to appear ...
I1207 20:03:37.274743 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:37.405242 8179 system_pods.go:59] 17 kube-system pods found
I1207 20:03:37.405328 8179 system_pods.go:61] "coredns-5dd5756b68-87sl4" [940861a1-dd18-48c7-9757-da06c9ac735a] Running
I1207 20:03:37.405352 8179 system_pods.go:61] "csi-hostpath-attacher-0" [145f2b2b-baa8-4da4-9a09-43f01eb7169c] Pending / Ready:ContainersNotReady (containers with unready status: [csi-attacher]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-attacher])
I1207 20:03:37.405378 8179 system_pods.go:61] "csi-hostpath-resizer-0" [32edb4a5-9f67-4e22-8e62-f565c69909e2] Pending / Ready:ContainersNotReady (containers with unready status: [csi-resizer]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-resizer])
I1207 20:03:37.405419 8179 system_pods.go:61] "csi-hostpathplugin-vddln" [89ac7dd2-6d3c-4306-89fb-0f13e6848ada] Pending / Ready:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter])
I1207 20:03:37.405439 8179 system_pods.go:61] "etcd-addons-946218" [6883eb4a-4422-4737-98ef-b8c420c8a4bb] Running
I1207 20:03:37.405460 8179 system_pods.go:61] "kube-apiserver-addons-946218" [6fcf420f-a09e-43c1-aa06-92fa28bbb164] Running
I1207 20:03:37.405491 8179 system_pods.go:61] "kube-controller-manager-addons-946218" [1cba9fe0-bb3d-4fad-973b-6526a279dc6b] Running
I1207 20:03:37.405518 8179 system_pods.go:61] "kube-ingress-dns-minikube" [95ff49c4-8d57-456f-93e2-0e4f3819d2df] Running / Ready:ContainersNotReady (containers with unready status: [minikube-ingress-dns]) / ContainersReady:ContainersNotReady (containers with unready status: [minikube-ingress-dns])
I1207 20:03:37.405538 8179 system_pods.go:61] "kube-proxy-t6tdx" [acf79489-4f26-418b-9de6-e1a72c27596d] Running
I1207 20:03:37.405560 8179 system_pods.go:61] "kube-scheduler-addons-946218" [4bb38dec-f323-44b2-b0de-ed93e55f9969] Running
I1207 20:03:37.405593 8179 system_pods.go:61] "metrics-server-7c66d45ddc-hc4mm" [796d70f9-0a3c-4906-923f-5239ec4a547f] Running
I1207 20:03:37.405613 8179 system_pods.go:61] "nvidia-device-plugin-daemonset-pq9kj" [c8d63810-cef8-46ea-8b3f-4a331c68a9ce] Running
I1207 20:03:37.405633 8179 system_pods.go:61] "registry-proxy-88zd5" [05cfdd65-400d-46d7-a81d-b22181d9c3d1] Running
I1207 20:03:37.405655 8179 system_pods.go:61] "registry-vbggm" [f9501618-888e-41c1-87bc-c0c145626641] Running
I1207 20:03:37.405691 8179 system_pods.go:61] "snapshot-controller-58dbcc7b99-7h4t9" [9c7d819d-d910-4377-bb62-2f0a03a55e45] Running
I1207 20:03:37.405714 8179 system_pods.go:61] "snapshot-controller-58dbcc7b99-f54xl" [e4fc1f3b-ebaa-446b-bb56-f6db137f1aa5] Running
I1207 20:03:37.405733 8179 system_pods.go:61] "storage-provisioner" [50e8b55e-af1d-4d96-85a4-723e4f951ca3] Running
I1207 20:03:37.405754 8179 system_pods.go:74] duration metric: took 175.819819ms to wait for pod list to return data ...
I1207 20:03:37.405776 8179 default_sa.go:34] waiting for default service account to be created ...
I1207 20:03:37.517060 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:37.589914 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:37.597789 8179 default_sa.go:45] found service account: "default"
I1207 20:03:37.597858 8179 default_sa.go:55] duration metric: took 192.050708ms for default service account to be created ...
I1207 20:03:37.597881 8179 system_pods.go:116] waiting for k8s-apps to be running ...
I1207 20:03:37.774790 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:37.806873 8179 system_pods.go:86] 17 kube-system pods found
I1207 20:03:37.806947 8179 system_pods.go:89] "coredns-5dd5756b68-87sl4" [940861a1-dd18-48c7-9757-da06c9ac735a] Running
I1207 20:03:37.806973 8179 system_pods.go:89] "csi-hostpath-attacher-0" [145f2b2b-baa8-4da4-9a09-43f01eb7169c] Pending / Ready:ContainersNotReady (containers with unready status: [csi-attacher]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-attacher])
I1207 20:03:37.807000 8179 system_pods.go:89] "csi-hostpath-resizer-0" [32edb4a5-9f67-4e22-8e62-f565c69909e2] Pending / Ready:ContainersNotReady (containers with unready status: [csi-resizer]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-resizer])
I1207 20:03:37.807043 8179 system_pods.go:89] "csi-hostpathplugin-vddln" [89ac7dd2-6d3c-4306-89fb-0f13e6848ada] Pending / Ready:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter])
I1207 20:03:37.807061 8179 system_pods.go:89] "etcd-addons-946218" [6883eb4a-4422-4737-98ef-b8c420c8a4bb] Running
I1207 20:03:37.807082 8179 system_pods.go:89] "kube-apiserver-addons-946218" [6fcf420f-a09e-43c1-aa06-92fa28bbb164] Running
I1207 20:03:37.807114 8179 system_pods.go:89] "kube-controller-manager-addons-946218" [1cba9fe0-bb3d-4fad-973b-6526a279dc6b] Running
I1207 20:03:37.807138 8179 system_pods.go:89] "kube-ingress-dns-minikube" [95ff49c4-8d57-456f-93e2-0e4f3819d2df] Running / Ready:ContainersNotReady (containers with unready status: [minikube-ingress-dns]) / ContainersReady:ContainersNotReady (containers with unready status: [minikube-ingress-dns])
I1207 20:03:37.807156 8179 system_pods.go:89] "kube-proxy-t6tdx" [acf79489-4f26-418b-9de6-e1a72c27596d] Running
I1207 20:03:37.807177 8179 system_pods.go:89] "kube-scheduler-addons-946218" [4bb38dec-f323-44b2-b0de-ed93e55f9969] Running
I1207 20:03:37.807199 8179 system_pods.go:89] "metrics-server-7c66d45ddc-hc4mm" [796d70f9-0a3c-4906-923f-5239ec4a547f] Running
I1207 20:03:37.807233 8179 system_pods.go:89] "nvidia-device-plugin-daemonset-pq9kj" [c8d63810-cef8-46ea-8b3f-4a331c68a9ce] Running
I1207 20:03:37.807252 8179 system_pods.go:89] "registry-proxy-88zd5" [05cfdd65-400d-46d7-a81d-b22181d9c3d1] Running
I1207 20:03:37.807274 8179 system_pods.go:89] "registry-vbggm" [f9501618-888e-41c1-87bc-c0c145626641] Running
I1207 20:03:37.807308 8179 system_pods.go:89] "snapshot-controller-58dbcc7b99-7h4t9" [9c7d819d-d910-4377-bb62-2f0a03a55e45] Running
I1207 20:03:37.807330 8179 system_pods.go:89] "snapshot-controller-58dbcc7b99-f54xl" [e4fc1f3b-ebaa-446b-bb56-f6db137f1aa5] Running
I1207 20:03:37.807347 8179 system_pods.go:89] "storage-provisioner" [50e8b55e-af1d-4d96-85a4-723e4f951ca3] Running
I1207 20:03:37.807368 8179 system_pods.go:126] duration metric: took 209.469491ms to wait for k8s-apps to be running ...
I1207 20:03:37.807389 8179 system_svc.go:44] waiting for kubelet service to be running ....
I1207 20:03:37.807467 8179 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1207 20:03:37.823714 8179 system_svc.go:56] duration metric: took 16.318002ms WaitForService to wait for kubelet.
I1207 20:03:37.823736 8179 kubeadm.go:581] duration metric: took 41.629305936s to wait for : map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] ...
I1207 20:03:37.823756 8179 node_conditions.go:102] verifying NodePressure condition ...
I1207 20:03:37.998575 8179 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
I1207 20:03:37.998604 8179 node_conditions.go:123] node cpu capacity is 2
I1207 20:03:37.998616 8179 node_conditions.go:105] duration metric: took 174.855397ms to run NodePressure ...
I1207 20:03:37.998629 8179 start.go:228] waiting for startup goroutines ...
I1207 20:03:38.018820 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:38.090420 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:38.274231 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:38.517933 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:38.594445 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:38.774284 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:39.020600 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:39.090050 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:39.273744 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:39.517876 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:39.589251 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:39.774700 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:40.036762 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:40.089358 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:40.274186 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:40.517620 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:40.589924 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:40.774767 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:41.017779 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:41.089511 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:41.274572 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:41.518288 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:41.593234 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:41.778673 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:42.031977 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:42.156251 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:42.290306 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:42.517476 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:42.590319 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:42.774423 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:43.017255 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:43.089990 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:43.273648 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:43.517339 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:43.595646 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:43.774825 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:44.017176 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:44.089813 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:44.273809 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:44.518288 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:44.590151 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:44.786409 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:45.026779 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:45.097244 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:45.282226 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:45.516611 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:45.589469 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:45.774279 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:46.017215 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:46.089990 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:46.274113 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:46.517457 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:46.589648 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:46.774554 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:47.017477 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:47.089284 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:47.273943 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:47.516883 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:47.589888 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:47.774867 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:48.020340 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:48.090593 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:48.290744 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:48.517850 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:48.589097 8179 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1207 20:03:48.773664 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:49.018302 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:49.090685 8179 kapi.go:107] duration metric: took 43.517839546s to wait for app.kubernetes.io/name=ingress-nginx ...
I1207 20:03:49.274745 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:49.517282 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:49.774137 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:50.022301 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:50.274439 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:50.516967 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:50.774568 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:51.017295 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:51.273852 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:51.517223 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:51.778949 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:52.018271 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:52.274140 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:52.516881 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:52.774629 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:53.017952 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:53.279320 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:53.517545 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:53.774566 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:54.019348 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:54.274338 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:54.516354 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:54.773953 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:55.017395 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:55.274041 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:55.517143 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:55.774471 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:56.018898 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:56.273461 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:56.516162 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1207 20:03:56.774065 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:57.021829 8179 kapi.go:107] duration metric: took 50.045132999s to wait for kubernetes.io/minikube-addons=csi-hostpath-driver ...
I1207 20:03:57.277283 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:57.774253 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:58.273381 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:58.773922 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:59.273680 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:03:59.774616 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:04:00.311594 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:04:00.773710 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:04:01.273608 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:04:01.774187 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:04:02.274138 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:04:02.774108 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:04:03.273971 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:04:03.773406 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:04:04.273884 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:04:04.773742 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:04:05.273580 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:04:05.774144 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:04:06.274185 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:04:06.774240 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:04:07.274286 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:04:07.773684 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:04:08.273918 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:04:08.774576 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:04:09.274072 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:04:09.774207 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:04:10.274256 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:04:10.773335 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:04:11.273635 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:04:11.774119 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:04:12.274099 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:04:12.774027 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:04:13.274073 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:04:13.774139 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:04:14.273729 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:04:14.773740 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:04:15.277801 8179 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1207 20:04:15.773249 8179 kapi.go:107] duration metric: took 1m7.011872852s to wait for kubernetes.io/minikube-addons=gcp-auth ...
I1207 20:04:15.775106 8179 out.go:177] * Your GCP credentials will now be mounted into every pod created in the addons-946218 cluster.
I1207 20:04:15.777195 8179 out.go:177] * If you don't want your credentials mounted into a specific pod, add a label with the `gcp-auth-skip-secret` key to your pod configuration.
I1207 20:04:15.778673 8179 out.go:177] * If you want existing pods to be mounted with credentials, either recreate them or rerun addons enable with --refresh.
I1207 20:04:15.780492 8179 out.go:177] * Enabled addons: default-storageclass, cloud-spanner, ingress-dns, storage-provisioner, nvidia-device-plugin, storage-provisioner-rancher, inspektor-gadget, metrics-server, volumesnapshots, registry, ingress, csi-hostpath-driver, gcp-auth
I1207 20:04:15.782283 8179 addons.go:502] enable addons completed in 1m19.961268887s: enabled=[default-storageclass cloud-spanner ingress-dns storage-provisioner nvidia-device-plugin storage-provisioner-rancher inspektor-gadget metrics-server volumesnapshots registry ingress csi-hostpath-driver gcp-auth]
I1207 20:04:15.782318 8179 start.go:233] waiting for cluster config update ...
I1207 20:04:15.782338 8179 start.go:242] writing updated cluster config ...
I1207 20:04:15.783097 8179 ssh_runner.go:195] Run: rm -f paused
I1207 20:04:16.101758 8179 start.go:600] kubectl: 1.28.4, cluster: 1.28.4 (minor skew: 0)
I1207 20:04:16.104016 8179 out.go:177] * Done! kubectl is now configured to use "addons-946218" cluster and "default" namespace by default
*
* ==> Docker <==
* Dec 07 20:05:03 addons-946218 dockerd[1094]: time="2023-12-07T20:05:03.921038067Z" level=info msg="ignoring event" container=cf065f5d302a82e5fd2f98e26daae9db4951fa3281f2b281026e7daeb2472be4 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Dec 07 20:05:04 addons-946218 cri-dockerd[1305]: time="2023-12-07T20:05:04Z" level=info msg="Stop pulling image gcr.io/google-samples/hello-app:1.0: Status: Downloaded newer image for gcr.io/google-samples/hello-app:1.0"
Dec 07 20:05:04 addons-946218 dockerd[1094]: time="2023-12-07T20:05:04.192923627Z" level=info msg="ignoring event" container=55efbfff40c194641abab7a4bd25fc8e40f35641ea09f28ca68b0ee3f66e75b6 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Dec 07 20:05:04 addons-946218 dockerd[1094]: time="2023-12-07T20:05:04.767794295Z" level=info msg="ignoring event" container=b919099d7cbbbdbb01d3d2c1f0094b5ebc6ce1dab5c9d7bfe95d33f88692f181 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Dec 07 20:05:09 addons-946218 dockerd[1094]: time="2023-12-07T20:05:09.500698082Z" level=info msg="ignoring event" container=cb2f8f76692e815b0317e2b0fac4976298e210ff28142dcbbb676dbd2059e8ae module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Dec 07 20:05:09 addons-946218 dockerd[1094]: time="2023-12-07T20:05:09.527509749Z" level=info msg="ignoring event" container=360401141c05b40b46060c5a05ec89adf35b65d1af488c5daf8e814ba374dd6f module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Dec 07 20:05:09 addons-946218 dockerd[1094]: time="2023-12-07T20:05:09.648010089Z" level=info msg="ignoring event" container=c5098e32a885ae94c9e18971b4cf8cdf8b43d0d57c59980f7b417ff2d18d0fe2 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Dec 07 20:05:09 addons-946218 dockerd[1094]: time="2023-12-07T20:05:09.696182907Z" level=info msg="ignoring event" container=c5c0b385d775980bf25c790110c6ded9f22d872d0cda63cf8f2f66cbfc80ec79 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Dec 07 20:05:11 addons-946218 cri-dockerd[1305]: time="2023-12-07T20:05:11Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/7776ed5b177a060c0e25a5fa64c5387b245f676ce8e3f77874367bab11267187/resolv.conf as [nameserver 10.96.0.10 search local-path-storage.svc.cluster.local svc.cluster.local cluster.local us-east-2.compute.internal options ndots:5]"
Dec 07 20:05:11 addons-946218 dockerd[1094]: time="2023-12-07T20:05:11.247350333Z" level=warning msg="reference for unknown type: " digest="sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79" remote="docker.io/library/busybox@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79"
Dec 07 20:05:11 addons-946218 cri-dockerd[1305]: time="2023-12-07T20:05:11Z" level=info msg="Stop pulling image docker.io/busybox:stable@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79: Status: Downloaded newer image for busybox@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79"
Dec 07 20:05:11 addons-946218 dockerd[1094]: time="2023-12-07T20:05:11.970578251Z" level=info msg="ignoring event" container=b7f74386d1e543ca04649d0957c1296df1281ddd1b37a6033d7184315b6ad4fe module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Dec 07 20:05:14 addons-946218 dockerd[1094]: time="2023-12-07T20:05:14.075855526Z" level=info msg="ignoring event" container=7776ed5b177a060c0e25a5fa64c5387b245f676ce8e3f77874367bab11267187 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Dec 07 20:05:16 addons-946218 cri-dockerd[1305]: time="2023-12-07T20:05:16Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/16b519c3911ba045908d0afd71c79f2020f54d2449b369b2a6dda44f6a614d11/resolv.conf as [nameserver 10.96.0.10 search default.svc.cluster.local svc.cluster.local cluster.local us-east-2.compute.internal options ndots:5]"
Dec 07 20:05:16 addons-946218 dockerd[1094]: time="2023-12-07T20:05:16.512568932Z" level=info msg="ignoring event" container=d91c20ff03d0ea7dc0626a284746eff1c89c695ef9a94272061c3a5f67daca7b module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Dec 07 20:05:16 addons-946218 cri-dockerd[1305]: time="2023-12-07T20:05:16Z" level=info msg="Stop pulling image busybox:stable: Status: Downloaded newer image for busybox:stable"
Dec 07 20:05:17 addons-946218 dockerd[1094]: time="2023-12-07T20:05:17.084569521Z" level=info msg="ignoring event" container=c71980c222a3bd2d23b710100d28e0cfe972e4806cac5936e2f45b911aded3d6 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Dec 07 20:05:18 addons-946218 dockerd[1094]: time="2023-12-07T20:05:18.321597996Z" level=info msg="ignoring event" container=16b519c3911ba045908d0afd71c79f2020f54d2449b369b2a6dda44f6a614d11 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Dec 07 20:05:20 addons-946218 cri-dockerd[1305]: time="2023-12-07T20:05:20Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/5533ccc0263224c1793492c2e40ac9b0eb3a4dc4ff4fe86ec0b5ba5cba9a4799/resolv.conf as [nameserver 10.96.0.10 search local-path-storage.svc.cluster.local svc.cluster.local cluster.local us-east-2.compute.internal options ndots:5]"
Dec 07 20:05:20 addons-946218 dockerd[1094]: time="2023-12-07T20:05:20.278321283Z" level=info msg="ignoring event" container=10a05235c152f3fc45c87326a7a5bebe33c3481005dff14fcb1c958328758d63 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Dec 07 20:05:20 addons-946218 dockerd[1094]: time="2023-12-07T20:05:20.797217559Z" level=info msg="ignoring event" container=d052eb56a118a0a2584aea37272b9c1e564489bba4cfb085b707c5e14e3ff4d3 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Dec 07 20:05:20 addons-946218 dockerd[1094]: time="2023-12-07T20:05:20.899441374Z" level=info msg="Container failed to exit within 2s of signal 15 - using the force" container=4663908f1e2cbdcacd9a821db19e79e46403f7ef10b3b1fbedba515e86ff1163
Dec 07 20:05:20 addons-946218 dockerd[1094]: time="2023-12-07T20:05:20.997920088Z" level=info msg="ignoring event" container=4663908f1e2cbdcacd9a821db19e79e46403f7ef10b3b1fbedba515e86ff1163 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Dec 07 20:05:21 addons-946218 dockerd[1094]: time="2023-12-07T20:05:21.123751371Z" level=info msg="ignoring event" container=e5c89fa1327ab0fae0924d95340c066fc3ca6a6b91357b67e48be9a34333dee6 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Dec 07 20:05:21 addons-946218 dockerd[1094]: time="2023-12-07T20:05:21.458845933Z" level=info msg="ignoring event" container=5533ccc0263224c1793492c2e40ac9b0eb3a4dc4ff4fe86ec0b5ba5cba9a4799 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
*
* ==> container status <==
* CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
d052eb56a118a dd1b12fcb6097 6 seconds ago Exited hello-world-app 2 7f0363c64c8df hello-world-app-5d77478584-mpj5l
10a05235c152f fc9db2894f4e4 6 seconds ago Exited helper-pod 0 5533ccc026322 helper-pod-delete-pvc-6224022a-bf0c-43f9-b398-1fc2163a085b
c71980c222a3b busybox@sha256:1ceb872bcc68a8fcd34c97952658b58086affdcb604c90c1dee2735bde5edc2f 10 seconds ago Exited busybox 0 16b519c3911ba test-local-path
b7f74386d1e54 busybox@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79 15 seconds ago Exited helper-pod 0 7776ed5b177a0 helper-pod-create-pvc-6224022a-bf0c-43f9-b398-1fc2163a085b
abdb137054e28 nginx@sha256:3923f8de8d2214b9490e68fd6ae63ea604deddd166df2755b788bef04848b9bc 35 seconds ago Running nginx 0 cc59cf879cc97 nginx
756627c4ffc90 gcr.io/k8s-minikube/gcp-auth-webhook@sha256:d730651bb6584f969d95d8279a754cf9d8d31b5055c43dbdb8d7363a8c6371cf About a minute ago Running gcp-auth 0 b18fe78bdfe3f gcp-auth-d4c87556c-npr8p
38dae9c94ed72 af594c6a879f2 About a minute ago Exited patch 1 20db6a82216fd ingress-nginx-admission-patch-q8q2r
1d7e79033dae7 registry.k8s.io/ingress-nginx/kube-webhook-certgen@sha256:a7943503b45d552785aa3b5e457f169a5661fb94d82b8a3373bcd9ebaf9aac80 About a minute ago Exited create 0 12ed10cb501ac ingress-nginx-admission-create-k7nq7
aaf473fd6d5f8 rancher/local-path-provisioner@sha256:e34c88ae0affb1cdefbb874140d6339d4a27ec4ee420ae8199cd839997b05246 About a minute ago Running local-path-provisioner 0 f851ecd18561a local-path-provisioner-78b46b4d5c-nvtlm
c8cc401b5adac gcr.io/cloud-spanner-emulator/emulator@sha256:9ded3fac22d4d1c85ae51473e3876e2377f5179192fea664409db0fe87e05ece 2 minutes ago Running cloud-spanner-emulator 0 a41f1d2ac4c91 cloud-spanner-emulator-5649c69bf6-mx6tg
869c65374f733 nvcr.io/nvidia/k8s-device-plugin@sha256:339be23400f58c04f09b6ba1d4d2e0e7120648f2b114880513685b22093311f1 2 minutes ago Running nvidia-device-plugin-ctr 0 94d4554a37f9a nvidia-device-plugin-daemonset-pq9kj
cc59fd8925b34 ba04bb24b9575 2 minutes ago Running storage-provisioner 0 12daa2e04df1a storage-provisioner
57bbb646b381c 97e04611ad434 2 minutes ago Running coredns 0 ebc440c71c4b7 coredns-5dd5756b68-87sl4
90b5454ab4cd0 3ca3ca488cf13 2 minutes ago Running kube-proxy 0 315e2ec7e69b0 kube-proxy-t6tdx
49427c98d9f8e 9961cbceaf234 2 minutes ago Running kube-controller-manager 0 24c264dcfef06 kube-controller-manager-addons-946218
cb07176ac708d 05c284c929889 2 minutes ago Running kube-scheduler 0 23d6d9754f8f0 kube-scheduler-addons-946218
427abc39042f1 9cdd6470f48c8 2 minutes ago Running etcd 0 68d7595002ab4 etcd-addons-946218
342a21508a16a 04b4c447bb9d4 2 minutes ago Running kube-apiserver 0 13178e7cd9916 kube-apiserver-addons-946218
*
* ==> coredns [57bbb646b381] <==
* [INFO] 10.244.0.18:58171 - 26841 "A IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000083076s
[INFO] 10.244.0.18:58171 - 44717 "AAAA IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000052816s
[INFO] 10.244.0.18:46396 - 57267 "AAAA IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.002269897s
[INFO] 10.244.0.18:46396 - 4898 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000110925s
[INFO] 10.244.0.18:58171 - 30843 "A IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001709574s
[INFO] 10.244.0.18:58171 - 32781 "AAAA IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001026764s
[INFO] 10.244.0.18:58171 - 34739 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000062851s
[INFO] 10.244.0.18:56225 - 8002 "A IN hello-world-app.default.svc.cluster.local.ingress-nginx.svc.cluster.local. udp 91 false 512" NXDOMAIN qr,aa,rd 184 0.000127318s
[INFO] 10.244.0.18:46402 - 2086 "A IN hello-world-app.default.svc.cluster.local.ingress-nginx.svc.cluster.local. udp 91 false 512" NXDOMAIN qr,aa,rd 184 0.000057198s
[INFO] 10.244.0.18:56225 - 23846 "AAAA IN hello-world-app.default.svc.cluster.local.ingress-nginx.svc.cluster.local. udp 91 false 512" NXDOMAIN qr,aa,rd 184 0.00004384s
[INFO] 10.244.0.18:46402 - 39186 "AAAA IN hello-world-app.default.svc.cluster.local.ingress-nginx.svc.cluster.local. udp 91 false 512" NXDOMAIN qr,aa,rd 184 0.000114403s
[INFO] 10.244.0.18:56225 - 58236 "A IN hello-world-app.default.svc.cluster.local.svc.cluster.local. udp 77 false 512" NXDOMAIN qr,aa,rd 170 0.000131995s
[INFO] 10.244.0.18:46402 - 1037 "A IN hello-world-app.default.svc.cluster.local.svc.cluster.local. udp 77 false 512" NXDOMAIN qr,aa,rd 170 0.000090403s
[INFO] 10.244.0.18:56225 - 29428 "AAAA IN hello-world-app.default.svc.cluster.local.svc.cluster.local. udp 77 false 512" NXDOMAIN qr,aa,rd 170 0.000188708s
[INFO] 10.244.0.18:46402 - 33915 "AAAA IN hello-world-app.default.svc.cluster.local.svc.cluster.local. udp 77 false 512" NXDOMAIN qr,aa,rd 170 0.000076315s
[INFO] 10.244.0.18:56225 - 11041 "A IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000055007s
[INFO] 10.244.0.18:46402 - 27864 "A IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000076266s
[INFO] 10.244.0.18:56225 - 45899 "AAAA IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000061136s
[INFO] 10.244.0.18:46402 - 47361 "AAAA IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000131437s
[INFO] 10.244.0.18:46402 - 3997 "A IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001947726s
[INFO] 10.244.0.18:56225 - 10564 "A IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.002189291s
[INFO] 10.244.0.18:56225 - 29015 "AAAA IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001772334s
[INFO] 10.244.0.18:46402 - 46177 "AAAA IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001320136s
[INFO] 10.244.0.18:56225 - 56364 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000064426s
[INFO] 10.244.0.18:46402 - 14832 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000049534s
*
* ==> describe nodes <==
* Name: addons-946218
Roles: control-plane
Labels: beta.kubernetes.io/arch=arm64
beta.kubernetes.io/os=linux
kubernetes.io/arch=arm64
kubernetes.io/hostname=addons-946218
kubernetes.io/os=linux
minikube.k8s.io/commit=e9ef2cce417fa3e029706bd52eaf40ea89608b2c
minikube.k8s.io/name=addons-946218
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2023_12_07T20_02_42_0700
minikube.k8s.io/version=v1.32.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
topology.hostpath.csi/node=addons-946218
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///var/run/cri-dockerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Thu, 07 Dec 2023 20:02:39 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: addons-946218
AcquireTime: <unset>
RenewTime: Thu, 07 Dec 2023 20:05:26 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Thu, 07 Dec 2023 20:05:16 +0000 Thu, 07 Dec 2023 20:02:35 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Thu, 07 Dec 2023 20:05:16 +0000 Thu, 07 Dec 2023 20:02:35 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Thu, 07 Dec 2023 20:05:16 +0000 Thu, 07 Dec 2023 20:02:35 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Thu, 07 Dec 2023 20:05:16 +0000 Thu, 07 Dec 2023 20:02:52 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.2
Hostname: addons-946218
Capacity:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022504Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022504Ki
pods: 110
System Info:
Machine ID: 32721876353a4a3683a7da666da8bcf3
System UUID: 4fa5688d-8776-4d1a-9879-2e8eb500ea66
Boot ID: 654d4215-4a80-4da6-8d0f-f014f59dffc2
Kernel Version: 5.15.0-1050-aws
OS Image: Ubuntu 22.04.3 LTS
Operating System: linux
Architecture: arm64
Container Runtime Version: docker://24.0.7
Kubelet Version: v1.28.4
Kube-Proxy Version: v1.28.4
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (13 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default cloud-spanner-emulator-5649c69bf6-mx6tg 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 2m26s
default hello-world-app-5d77478584-mpj5l 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 27s
default nginx 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 37s
gcp-auth gcp-auth-d4c87556c-npr8p 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 2m18s
kube-system coredns-5dd5756b68-87sl4 100m (5%!)(MISSING) 0 (0%!)(MISSING) 70Mi (0%!)(MISSING) 170Mi (2%!)(MISSING) 2m30s
kube-system etcd-addons-946218 100m (5%!)(MISSING) 0 (0%!)(MISSING) 100Mi (1%!)(MISSING) 0 (0%!)(MISSING) 2m44s
kube-system kube-apiserver-addons-946218 250m (12%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 2m45s
kube-system kube-controller-manager-addons-946218 200m (10%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 2m44s
kube-system kube-proxy-t6tdx 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 2m31s
kube-system kube-scheduler-addons-946218 100m (5%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 2m44s
kube-system nvidia-device-plugin-daemonset-pq9kj 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 2m27s
kube-system storage-provisioner 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 2m24s
local-path-storage local-path-provisioner-78b46b4d5c-nvtlm 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 2m24s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 750m (37%!)(MISSING) 0 (0%!)(MISSING)
memory 170Mi (2%!)(MISSING) 170Mi (2%!)(MISSING)
ephemeral-storage 0 (0%!)(MISSING) 0 (0%!)(MISSING)
hugepages-1Gi 0 (0%!)(MISSING) 0 (0%!)(MISSING)
hugepages-2Mi 0 (0%!)(MISSING) 0 (0%!)(MISSING)
hugepages-32Mi 0 (0%!)(MISSING) 0 (0%!)(MISSING)
hugepages-64Ki 0 (0%!)(MISSING) 0 (0%!)(MISSING)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 2m28s kube-proxy
Normal NodeHasSufficientMemory 2m52s (x8 over 2m52s) kubelet Node addons-946218 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 2m52s (x8 over 2m52s) kubelet Node addons-946218 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 2m52s (x7 over 2m52s) kubelet Node addons-946218 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 2m52s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 2m44s kubelet Node addons-946218 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 2m44s kubelet Node addons-946218 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 2m44s kubelet Node addons-946218 status is now: NodeHasSufficientPID
Normal NodeNotReady 2m44s kubelet Node addons-946218 status is now: NodeNotReady
Normal NodeAllocatableEnforced 2m44s kubelet Updated Node Allocatable limit across pods
Normal Starting 2m44s kubelet Starting kubelet.
Normal NodeReady 2m34s kubelet Node addons-946218 status is now: NodeReady
Normal RegisteredNode 2m32s node-controller Node addons-946218 event: Registered Node addons-946218 in Controller
*
* ==> dmesg <==
* [Dec 7 19:17] ACPI: SRAT not present
[ +0.000000] ACPI: SRAT not present
[ +0.000000] SPI driver altr_a10sr has no spi_device_id for altr,a10sr
[ +0.015157] device-mapper: core: CONFIG_IMA_DISABLE_HTABLE is disabled. Duplicate IMA measurements will not be recorded in the IMA log.
[ +1.345860] ena 0000:00:05.0: LLQ is not supported Fallback to host mode policy.
[ +6.584547] kauditd_printk_skb: 26 callbacks suppressed
*
* ==> etcd [427abc39042f] <==
* {"level":"info","ts":"2023-12-07T20:02:35.189525Z","caller":"etcdserver/server.go:738","msg":"started as single-node; fast-forwarding election ticks","local-member-id":"aec36adc501070cc","forward-ticks":9,"forward-duration":"900ms","election-ticks":10,"election-timeout":"1s"}
{"level":"info","ts":"2023-12-07T20:02:35.1899Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc switched to configuration voters=(12593026477526642892)"}
{"level":"info","ts":"2023-12-07T20:02:35.190249Z","caller":"membership/cluster.go:421","msg":"added member","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","added-peer-id":"aec36adc501070cc","added-peer-peer-urls":["https://192.168.49.2:2380"]}
{"level":"info","ts":"2023-12-07T20:02:35.190062Z","caller":"embed/etcd.go:597","msg":"serving peer traffic","address":"192.168.49.2:2380"}
{"level":"info","ts":"2023-12-07T20:02:35.190276Z","caller":"embed/etcd.go:569","msg":"cmux::serve","address":"192.168.49.2:2380"}
{"level":"info","ts":"2023-12-07T20:02:35.191817Z","caller":"embed/etcd.go:278","msg":"now serving peer/client/metrics","local-member-id":"aec36adc501070cc","initial-advertise-peer-urls":["https://192.168.49.2:2380"],"listen-peer-urls":["https://192.168.49.2:2380"],"advertise-client-urls":["https://192.168.49.2:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.49.2:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
{"level":"info","ts":"2023-12-07T20:02:35.191847Z","caller":"embed/etcd.go:855","msg":"serving metrics","address":"http://127.0.0.1:2381"}
{"level":"info","ts":"2023-12-07T20:02:35.620802Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc is starting a new election at term 1"}
{"level":"info","ts":"2023-12-07T20:02:35.621015Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became pre-candidate at term 1"}
{"level":"info","ts":"2023-12-07T20:02:35.621132Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgPreVoteResp from aec36adc501070cc at term 1"}
{"level":"info","ts":"2023-12-07T20:02:35.621227Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became candidate at term 2"}
{"level":"info","ts":"2023-12-07T20:02:35.621318Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgVoteResp from aec36adc501070cc at term 2"}
{"level":"info","ts":"2023-12-07T20:02:35.621409Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became leader at term 2"}
{"level":"info","ts":"2023-12-07T20:02:35.621518Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: aec36adc501070cc elected leader aec36adc501070cc at term 2"}
{"level":"info","ts":"2023-12-07T20:02:35.624786Z","caller":"etcdserver/server.go:2062","msg":"published local member to cluster through raft","local-member-id":"aec36adc501070cc","local-member-attributes":"{Name:addons-946218 ClientURLs:[https://192.168.49.2:2379]}","request-path":"/0/members/aec36adc501070cc/attributes","cluster-id":"fa54960ea34d58be","publish-timeout":"7s"}
{"level":"info","ts":"2023-12-07T20:02:35.625697Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2023-12-07T20:02:35.626933Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2023-12-07T20:02:35.627144Z","caller":"etcdserver/server.go:2571","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2023-12-07T20:02:35.627448Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2023-12-07T20:02:35.63572Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.49.2:2379"}
{"level":"info","ts":"2023-12-07T20:02:35.636338Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","cluster-version":"3.5"}
{"level":"info","ts":"2023-12-07T20:02:35.636537Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2023-12-07T20:02:35.636653Z","caller":"etcdserver/server.go:2595","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2023-12-07T20:02:35.640729Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2023-12-07T20:02:35.640882Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
*
* ==> gcp-auth [756627c4ffc9] <==
* 2023/12/07 20:04:15 GCP Auth Webhook started!
2023/12/07 20:04:26 Ready to marshal response ...
2023/12/07 20:04:26 Ready to write response ...
2023/12/07 20:04:36 Ready to marshal response ...
2023/12/07 20:04:36 Ready to write response ...
2023/12/07 20:04:49 Ready to marshal response ...
2023/12/07 20:04:49 Ready to write response ...
2023/12/07 20:04:53 Ready to marshal response ...
2023/12/07 20:04:53 Ready to write response ...
2023/12/07 20:04:59 Ready to marshal response ...
2023/12/07 20:04:59 Ready to write response ...
2023/12/07 20:05:10 Ready to marshal response ...
2023/12/07 20:05:10 Ready to write response ...
2023/12/07 20:05:10 Ready to marshal response ...
2023/12/07 20:05:10 Ready to write response ...
2023/12/07 20:05:19 Ready to marshal response ...
2023/12/07 20:05:19 Ready to write response ...
*
* ==> kernel <==
* 20:05:26 up 47 min, 0 users, load average: 1.61, 1.49, 0.66
Linux addons-946218 5.15.0-1050-aws #55~20.04.1-Ubuntu SMP Mon Nov 6 12:18:16 UTC 2023 aarch64 aarch64 aarch64 GNU/Linux
PRETTY_NAME="Ubuntu 22.04.3 LTS"
*
* ==> kube-apiserver [342a21508a16] <==
* I1207 20:04:42.482457 1 handler.go:232] Adding GroupVersion gadget.kinvolk.io v1alpha1 to ResourceManager
W1207 20:04:43.500698 1 cacher.go:171] Terminating all watchers from cacher traces.gadget.kinvolk.io
I1207 20:04:48.661434 1 controller.go:624] quota admission added evaluator for: ingresses.networking.k8s.io
I1207 20:04:49.036027 1 controller.go:624] quota admission added evaluator for: volumesnapshots.snapshot.storage.k8s.io
I1207 20:04:49.388958 1 alloc.go:330] "allocated clusterIPs" service="default/nginx" clusterIPs={"IPv4":"10.104.5.87"}
I1207 20:05:00.560536 1 alloc.go:330] "allocated clusterIPs" service="default/hello-world-app" clusterIPs={"IPv4":"10.99.255.99"}
I1207 20:05:09.276615 1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I1207 20:05:09.277192 1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I1207 20:05:09.287500 1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I1207 20:05:09.288352 1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I1207 20:05:09.296852 1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I1207 20:05:09.296947 1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I1207 20:05:09.307667 1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I1207 20:05:09.307716 1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I1207 20:05:09.326955 1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I1207 20:05:09.327226 1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I1207 20:05:09.340066 1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I1207 20:05:09.340129 1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I1207 20:05:09.365437 1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I1207 20:05:09.365493 1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I1207 20:05:09.370153 1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I1207 20:05:09.370252 1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
W1207 20:05:10.297652 1 cacher.go:171] Terminating all watchers from cacher volumesnapshotclasses.snapshot.storage.k8s.io
W1207 20:05:10.370710 1 cacher.go:171] Terminating all watchers from cacher volumesnapshotcontents.snapshot.storage.k8s.io
W1207 20:05:10.389245 1 cacher.go:171] Terminating all watchers from cacher volumesnapshots.snapshot.storage.k8s.io
*
* ==> kube-controller-manager [49427c98d9f8] <==
* W1207 20:05:13.988291 1 reflector.go:535] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1207 20:05:13.988330 1 reflector.go:147] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W1207 20:05:14.418449 1 reflector.go:535] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1207 20:05:14.418484 1 reflector.go:147] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W1207 20:05:15.159966 1 reflector.go:535] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1207 20:05:15.160009 1 reflector.go:147] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W1207 20:05:17.215663 1 reflector.go:535] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1207 20:05:17.215694 1 reflector.go:147] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W1207 20:05:17.580902 1 reflector.go:535] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1207 20:05:17.580937 1 reflector.go:147] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I1207 20:05:17.856676 1 job_controller.go:562] "enqueueing job" key="ingress-nginx/ingress-nginx-admission-create"
I1207 20:05:17.857526 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="ingress-nginx/ingress-nginx-controller-7c6974c4d8" duration="96.196µs"
I1207 20:05:17.866158 1 job_controller.go:562] "enqueueing job" key="ingress-nginx/ingress-nginx-admission-patch"
W1207 20:05:19.156556 1 reflector.go:535] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1207 20:05:19.156589 1 reflector.go:147] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I1207 20:05:20.182766 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-78b46b4d5c" duration="17.51µs"
I1207 20:05:21.394806 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/hello-world-app-5d77478584" duration="70.121µs"
W1207 20:05:23.765835 1 reflector.go:535] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1207 20:05:23.765868 1 reflector.go:147] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W1207 20:05:25.199436 1 reflector.go:535] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1207 20:05:25.199672 1 reflector.go:147] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I1207 20:05:25.273059 1 shared_informer.go:311] Waiting for caches to sync for resource quota
I1207 20:05:25.273330 1 shared_informer.go:318] Caches are synced for resource quota
I1207 20:05:25.725834 1 shared_informer.go:311] Waiting for caches to sync for garbage collector
I1207 20:05:25.725890 1 shared_informer.go:318] Caches are synced for garbage collector
*
* ==> kube-proxy [90b5454ab4cd] <==
* I1207 20:02:57.111488 1 server_others.go:69] "Using iptables proxy"
I1207 20:02:57.143889 1 node.go:141] Successfully retrieved node IP: 192.168.49.2
I1207 20:02:57.321183 1 server.go:632] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I1207 20:02:57.325748 1 server_others.go:152] "Using iptables Proxier"
I1207 20:02:57.325781 1 server_others.go:421] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
I1207 20:02:57.325788 1 server_others.go:438] "Defaulting to no-op detect-local"
I1207 20:02:57.325834 1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I1207 20:02:57.326034 1 server.go:846] "Version info" version="v1.28.4"
I1207 20:02:57.326045 1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1207 20:02:57.327070 1 config.go:188] "Starting service config controller"
I1207 20:02:57.327121 1 shared_informer.go:311] Waiting for caches to sync for service config
I1207 20:02:57.327144 1 config.go:97] "Starting endpoint slice config controller"
I1207 20:02:57.327148 1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
I1207 20:02:57.329602 1 config.go:315] "Starting node config controller"
I1207 20:02:57.329614 1 shared_informer.go:311] Waiting for caches to sync for node config
I1207 20:02:57.427207 1 shared_informer.go:318] Caches are synced for service config
I1207 20:02:57.427275 1 shared_informer.go:318] Caches are synced for endpoint slice config
I1207 20:02:57.429945 1 shared_informer.go:318] Caches are synced for node config
*
* ==> kube-scheduler [cb07176ac708] <==
* W1207 20:02:39.401545 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E1207 20:02:39.402621 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
W1207 20:02:39.401578 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
W1207 20:02:39.401614 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E1207 20:02:39.402874 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E1207 20:02:39.402848 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
W1207 20:02:39.403410 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E1207 20:02:39.403528 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
W1207 20:02:39.403668 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E1207 20:02:39.403748 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
W1207 20:02:40.212981 1 reflector.go:535] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E1207 20:02:40.213322 1 reflector.go:147] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
W1207 20:02:40.237879 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E1207 20:02:40.237923 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
W1207 20:02:40.240301 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E1207 20:02:40.240519 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
W1207 20:02:40.372208 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E1207 20:02:40.372469 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
W1207 20:02:40.384505 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E1207 20:02:40.384762 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
W1207 20:02:40.424265 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E1207 20:02:40.424304 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
W1207 20:02:40.484260 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E1207 20:02:40.484299 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
I1207 20:02:41.987884 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
*
* ==> kubelet <==
* Dec 07 20:05:21 addons-946218 kubelet[2322]: I1207 20:05:21.309745 2322 operation_generator.go:882] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/294b182c-8c00-4bd0-a19d-370f998cc8b3-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "294b182c-8c00-4bd0-a19d-370f998cc8b3" (UID: "294b182c-8c00-4bd0-a19d-370f998cc8b3"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue ""
Dec 07 20:05:21 addons-946218 kubelet[2322]: I1207 20:05:21.310131 2322 operation_generator.go:882] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/294b182c-8c00-4bd0-a19d-370f998cc8b3-kube-api-access-cdldn" (OuterVolumeSpecName: "kube-api-access-cdldn") pod "294b182c-8c00-4bd0-a19d-370f998cc8b3" (UID: "294b182c-8c00-4bd0-a19d-370f998cc8b3"). InnerVolumeSpecName "kube-api-access-cdldn". PluginName "kubernetes.io/projected", VolumeGidValue ""
Dec 07 20:05:21 addons-946218 kubelet[2322]: I1207 20:05:21.356620 2322 scope.go:117] "RemoveContainer" containerID="4663908f1e2cbdcacd9a821db19e79e46403f7ef10b3b1fbedba515e86ff1163"
Dec 07 20:05:21 addons-946218 kubelet[2322]: I1207 20:05:21.381937 2322 scope.go:117] "RemoveContainer" containerID="4663908f1e2cbdcacd9a821db19e79e46403f7ef10b3b1fbedba515e86ff1163"
Dec 07 20:05:21 addons-946218 kubelet[2322]: E1207 20:05:21.383138 2322 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: 4663908f1e2cbdcacd9a821db19e79e46403f7ef10b3b1fbedba515e86ff1163" containerID="4663908f1e2cbdcacd9a821db19e79e46403f7ef10b3b1fbedba515e86ff1163"
Dec 07 20:05:21 addons-946218 kubelet[2322]: I1207 20:05:21.383194 2322 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"docker","ID":"4663908f1e2cbdcacd9a821db19e79e46403f7ef10b3b1fbedba515e86ff1163"} err="failed to get container status \"4663908f1e2cbdcacd9a821db19e79e46403f7ef10b3b1fbedba515e86ff1163\": rpc error: code = Unknown desc = Error response from daemon: No such container: 4663908f1e2cbdcacd9a821db19e79e46403f7ef10b3b1fbedba515e86ff1163"
Dec 07 20:05:21 addons-946218 kubelet[2322]: I1207 20:05:21.384675 2322 scope.go:117] "RemoveContainer" containerID="b919099d7cbbbdbb01d3d2c1f0094b5ebc6ce1dab5c9d7bfe95d33f88692f181"
Dec 07 20:05:21 addons-946218 kubelet[2322]: I1207 20:05:21.385149 2322 scope.go:117] "RemoveContainer" containerID="d052eb56a118a0a2584aea37272b9c1e564489bba4cfb085b707c5e14e3ff4d3"
Dec 07 20:05:21 addons-946218 kubelet[2322]: E1207 20:05:21.397914 2322 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"hello-world-app\" with CrashLoopBackOff: \"back-off 20s restarting failed container=hello-world-app pod=hello-world-app-5d77478584-mpj5l_default(fdc5fefe-1fa6-4bdf-b0b9-e619968c2142)\"" pod="default/hello-world-app-5d77478584-mpj5l" podUID="fdc5fefe-1fa6-4bdf-b0b9-e619968c2142"
Dec 07 20:05:21 addons-946218 kubelet[2322]: I1207 20:05:21.409236 2322 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-cdldn\" (UniqueName: \"kubernetes.io/projected/294b182c-8c00-4bd0-a19d-370f998cc8b3-kube-api-access-cdldn\") on node \"addons-946218\" DevicePath \"\""
Dec 07 20:05:21 addons-946218 kubelet[2322]: I1207 20:05:21.409284 2322 reconciler_common.go:300] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/294b182c-8c00-4bd0-a19d-370f998cc8b3-webhook-cert\") on node \"addons-946218\" DevicePath \"\""
Dec 07 20:05:21 addons-946218 kubelet[2322]: I1207 20:05:21.510201 2322 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"gcp-creds\" (UniqueName: \"kubernetes.io/host-path/5c7c8634-3e0a-46c4-9ff4-6644b1fce6c7-gcp-creds\") pod \"5c7c8634-3e0a-46c4-9ff4-6644b1fce6c7\" (UID: \"5c7c8634-3e0a-46c4-9ff4-6644b1fce6c7\") "
Dec 07 20:05:21 addons-946218 kubelet[2322]: I1207 20:05:21.510448 2322 operation_generator.go:882] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5c7c8634-3e0a-46c4-9ff4-6644b1fce6c7-gcp-creds" (OuterVolumeSpecName: "gcp-creds") pod "5c7c8634-3e0a-46c4-9ff4-6644b1fce6c7" (UID: "5c7c8634-3e0a-46c4-9ff4-6644b1fce6c7"). InnerVolumeSpecName "gcp-creds". PluginName "kubernetes.io/host-path", VolumeGidValue ""
Dec 07 20:05:21 addons-946218 kubelet[2322]: I1207 20:05:21.510516 2322 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t7cbk\" (UniqueName: \"kubernetes.io/projected/5c7c8634-3e0a-46c4-9ff4-6644b1fce6c7-kube-api-access-t7cbk\") pod \"5c7c8634-3e0a-46c4-9ff4-6644b1fce6c7\" (UID: \"5c7c8634-3e0a-46c4-9ff4-6644b1fce6c7\") "
Dec 07 20:05:21 addons-946218 kubelet[2322]: I1207 20:05:21.510628 2322 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"script\" (UniqueName: \"kubernetes.io/configmap/5c7c8634-3e0a-46c4-9ff4-6644b1fce6c7-script\") pod \"5c7c8634-3e0a-46c4-9ff4-6644b1fce6c7\" (UID: \"5c7c8634-3e0a-46c4-9ff4-6644b1fce6c7\") "
Dec 07 20:05:21 addons-946218 kubelet[2322]: I1207 20:05:21.510673 2322 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"data\" (UniqueName: \"kubernetes.io/host-path/5c7c8634-3e0a-46c4-9ff4-6644b1fce6c7-data\") pod \"5c7c8634-3e0a-46c4-9ff4-6644b1fce6c7\" (UID: \"5c7c8634-3e0a-46c4-9ff4-6644b1fce6c7\") "
Dec 07 20:05:21 addons-946218 kubelet[2322]: I1207 20:05:21.510780 2322 reconciler_common.go:300] "Volume detached for volume \"gcp-creds\" (UniqueName: \"kubernetes.io/host-path/5c7c8634-3e0a-46c4-9ff4-6644b1fce6c7-gcp-creds\") on node \"addons-946218\" DevicePath \"\""
Dec 07 20:05:21 addons-946218 kubelet[2322]: I1207 20:05:21.510853 2322 operation_generator.go:882] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5c7c8634-3e0a-46c4-9ff4-6644b1fce6c7-data" (OuterVolumeSpecName: "data") pod "5c7c8634-3e0a-46c4-9ff4-6644b1fce6c7" (UID: "5c7c8634-3e0a-46c4-9ff4-6644b1fce6c7"). InnerVolumeSpecName "data". PluginName "kubernetes.io/host-path", VolumeGidValue ""
Dec 07 20:05:21 addons-946218 kubelet[2322]: I1207 20:05:21.512463 2322 operation_generator.go:882] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5c7c8634-3e0a-46c4-9ff4-6644b1fce6c7-script" (OuterVolumeSpecName: "script") pod "5c7c8634-3e0a-46c4-9ff4-6644b1fce6c7" (UID: "5c7c8634-3e0a-46c4-9ff4-6644b1fce6c7"). InnerVolumeSpecName "script". PluginName "kubernetes.io/configmap", VolumeGidValue ""
Dec 07 20:05:21 addons-946218 kubelet[2322]: I1207 20:05:21.516891 2322 operation_generator.go:882] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c7c8634-3e0a-46c4-9ff4-6644b1fce6c7-kube-api-access-t7cbk" (OuterVolumeSpecName: "kube-api-access-t7cbk") pod "5c7c8634-3e0a-46c4-9ff4-6644b1fce6c7" (UID: "5c7c8634-3e0a-46c4-9ff4-6644b1fce6c7"). InnerVolumeSpecName "kube-api-access-t7cbk". PluginName "kubernetes.io/projected", VolumeGidValue ""
Dec 07 20:05:21 addons-946218 kubelet[2322]: I1207 20:05:21.611055 2322 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-t7cbk\" (UniqueName: \"kubernetes.io/projected/5c7c8634-3e0a-46c4-9ff4-6644b1fce6c7-kube-api-access-t7cbk\") on node \"addons-946218\" DevicePath \"\""
Dec 07 20:05:21 addons-946218 kubelet[2322]: I1207 20:05:21.611089 2322 reconciler_common.go:300] "Volume detached for volume \"script\" (UniqueName: \"kubernetes.io/configmap/5c7c8634-3e0a-46c4-9ff4-6644b1fce6c7-script\") on node \"addons-946218\" DevicePath \"\""
Dec 07 20:05:21 addons-946218 kubelet[2322]: I1207 20:05:21.611101 2322 reconciler_common.go:300] "Volume detached for volume \"data\" (UniqueName: \"kubernetes.io/host-path/5c7c8634-3e0a-46c4-9ff4-6644b1fce6c7-data\") on node \"addons-946218\" DevicePath \"\""
Dec 07 20:05:22 addons-946218 kubelet[2322]: I1207 20:05:22.411081 2322 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5533ccc0263224c1793492c2e40ac9b0eb3a4dc4ff4fe86ec0b5ba5cba9a4799"
Dec 07 20:05:22 addons-946218 kubelet[2322]: I1207 20:05:22.661677 2322 kubelet_volumes.go:161] "Cleaned up orphaned pod volumes dir" podUID="294b182c-8c00-4bd0-a19d-370f998cc8b3" path="/var/lib/kubelet/pods/294b182c-8c00-4bd0-a19d-370f998cc8b3/volumes"
*
* ==> storage-provisioner [cc59fd8925b3] <==
* I1207 20:03:04.897809 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1207 20:03:04.928508 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1207 20:03:04.929851 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1207 20:03:04.945078 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1207 20:03:04.945262 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_addons-946218_e3713f22-599b-4515-b325-7f5e4335cb05!
I1207 20:03:04.945368 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"13c59dd0-3cc9-4f7c-9875-695381f72f55", APIVersion:"v1", ResourceVersion:"642", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' addons-946218_e3713f22-599b-4515-b325-7f5e4335cb05 became leader
I1207 20:03:05.046638 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_addons-946218_e3713f22-599b-4515-b325-7f5e4335cb05!
-- /stdout --
helpers_test.go:254: (dbg) Run: out/minikube-linux-arm64 status --format={{.APIServer}} -p addons-946218 -n addons-946218
helpers_test.go:261: (dbg) Run: kubectl --context addons-946218 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:285: <<< TestAddons/parallel/Ingress FAILED: end of post-mortem logs <<<
helpers_test.go:286: ---------------------/post-mortem---------------------------------
--- FAIL: TestAddons/parallel/Ingress (39.46s)