Test Report: Docker_Linux_containerd_arm64 17703

                    
                      e76ebe347b3a1e1a0d734b84313c6ab0b6541a2c:2023-12-01:32109
                    
                

Test fail (14/314)

x
+
TestAddons/parallel/Ingress (38.33s)

                                                
                                                
=== RUN   TestAddons/parallel/Ingress
=== PAUSE TestAddons/parallel/Ingress

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/Ingress
addons_test.go:206: (dbg) Run:  kubectl --context addons-488129 wait --for=condition=ready --namespace=ingress-nginx pod --selector=app.kubernetes.io/component=controller --timeout=90s
addons_test.go:231: (dbg) Run:  kubectl --context addons-488129 replace --force -f testdata/nginx-ingress-v1.yaml
addons_test.go:244: (dbg) Run:  kubectl --context addons-488129 replace --force -f testdata/nginx-pod-svc.yaml
addons_test.go:249: (dbg) TestAddons/parallel/Ingress: waiting 8m0s for pods matching "run=nginx" in namespace "default" ...
helpers_test.go:344: "nginx" [9deca4c7-d419-4e74-87c0-e0dc4481a7f1] Pending / Ready:ContainersNotReady (containers with unready status: [nginx]) / ContainersReady:ContainersNotReady (containers with unready status: [nginx])
helpers_test.go:344: "nginx" [9deca4c7-d419-4e74-87c0-e0dc4481a7f1] Running
addons_test.go:249: (dbg) TestAddons/parallel/Ingress: run=nginx healthy within 9.013674941s
addons_test.go:261: (dbg) Run:  out/minikube-linux-arm64 -p addons-488129 ssh "curl -s http://127.0.0.1/ -H 'Host: nginx.example.com'"
addons_test.go:285: (dbg) Run:  kubectl --context addons-488129 replace --force -f testdata/ingress-dns-example-v1.yaml
addons_test.go:290: (dbg) Run:  out/minikube-linux-arm64 -p addons-488129 ip
addons_test.go:296: (dbg) Run:  nslookup hello-john.test 192.168.49.2
addons_test.go:296: (dbg) Non-zero exit: nslookup hello-john.test 192.168.49.2: exit status 1 (15.136781078s)

                                                
                                                
-- stdout --
	;; connection timed out; no servers could be reached
	
	

                                                
                                                
-- /stdout --
addons_test.go:298: failed to nslookup hello-john.test host. args "nslookup hello-john.test 192.168.49.2" : exit status 1
addons_test.go:302: unexpected output from nslookup. stdout: ;; connection timed out; no servers could be reached

                                                
                                                

                                                
                                                

                                                
                                                
stderr: 
addons_test.go:305: (dbg) Run:  out/minikube-linux-arm64 -p addons-488129 addons disable ingress-dns --alsologtostderr -v=1
addons_test.go:305: (dbg) Done: out/minikube-linux-arm64 -p addons-488129 addons disable ingress-dns --alsologtostderr -v=1: (1.288889554s)
addons_test.go:310: (dbg) Run:  out/minikube-linux-arm64 -p addons-488129 addons disable ingress --alsologtostderr -v=1
addons_test.go:310: (dbg) Done: out/minikube-linux-arm64 -p addons-488129 addons disable ingress --alsologtostderr -v=1: (7.793589661s)
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======>  post-mortem[TestAddons/parallel/Ingress]: docker inspect <======
helpers_test.go:231: (dbg) Run:  docker inspect addons-488129
helpers_test.go:235: (dbg) docker inspect addons-488129:

                                                
                                                
-- stdout --
	[
	    {
	        "Id": "dcde7581e9ae4c65546df88c28f59648697b482586a2688a22458ea7efe217a9",
	        "Created": "2023-12-01T18:52:25.369213294Z",
	        "Path": "/usr/local/bin/entrypoint",
	        "Args": [
	            "/sbin/init"
	        ],
	        "State": {
	            "Status": "running",
	            "Running": true,
	            "Paused": false,
	            "Restarting": false,
	            "OOMKilled": false,
	            "Dead": false,
	            "Pid": 259349,
	            "ExitCode": 0,
	            "Error": "",
	            "StartedAt": "2023-12-01T18:52:25.700049394Z",
	            "FinishedAt": "0001-01-01T00:00:00Z"
	        },
	        "Image": "sha256:e4e0f3cc6f04c458835e9edb05d52f031520d40521bc3568d81cbb7c06a79ef2",
	        "ResolvConfPath": "/var/lib/docker/containers/dcde7581e9ae4c65546df88c28f59648697b482586a2688a22458ea7efe217a9/resolv.conf",
	        "HostnamePath": "/var/lib/docker/containers/dcde7581e9ae4c65546df88c28f59648697b482586a2688a22458ea7efe217a9/hostname",
	        "HostsPath": "/var/lib/docker/containers/dcde7581e9ae4c65546df88c28f59648697b482586a2688a22458ea7efe217a9/hosts",
	        "LogPath": "/var/lib/docker/containers/dcde7581e9ae4c65546df88c28f59648697b482586a2688a22458ea7efe217a9/dcde7581e9ae4c65546df88c28f59648697b482586a2688a22458ea7efe217a9-json.log",
	        "Name": "/addons-488129",
	        "RestartCount": 0,
	        "Driver": "overlay2",
	        "Platform": "linux",
	        "MountLabel": "",
	        "ProcessLabel": "",
	        "AppArmorProfile": "unconfined",
	        "ExecIDs": null,
	        "HostConfig": {
	            "Binds": [
	                "/lib/modules:/lib/modules:ro",
	                "addons-488129:/var"
	            ],
	            "ContainerIDFile": "",
	            "LogConfig": {
	                "Type": "json-file",
	                "Config": {}
	            },
	            "NetworkMode": "addons-488129",
	            "PortBindings": {
	                "22/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "2376/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "32443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "5000/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "8443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ]
	            },
	            "RestartPolicy": {
	                "Name": "no",
	                "MaximumRetryCount": 0
	            },
	            "AutoRemove": false,
	            "VolumeDriver": "",
	            "VolumesFrom": null,
	            "ConsoleSize": [
	                0,
	                0
	            ],
	            "CapAdd": null,
	            "CapDrop": null,
	            "CgroupnsMode": "host",
	            "Dns": [],
	            "DnsOptions": [],
	            "DnsSearch": [],
	            "ExtraHosts": null,
	            "GroupAdd": null,
	            "IpcMode": "private",
	            "Cgroup": "",
	            "Links": null,
	            "OomScoreAdj": 0,
	            "PidMode": "",
	            "Privileged": true,
	            "PublishAllPorts": false,
	            "ReadonlyRootfs": false,
	            "SecurityOpt": [
	                "seccomp=unconfined",
	                "apparmor=unconfined",
	                "label=disable"
	            ],
	            "Tmpfs": {
	                "/run": "",
	                "/tmp": ""
	            },
	            "UTSMode": "",
	            "UsernsMode": "",
	            "ShmSize": 67108864,
	            "Runtime": "runc",
	            "Isolation": "",
	            "CpuShares": 0,
	            "Memory": 4194304000,
	            "NanoCpus": 2000000000,
	            "CgroupParent": "",
	            "BlkioWeight": 0,
	            "BlkioWeightDevice": [],
	            "BlkioDeviceReadBps": [],
	            "BlkioDeviceWriteBps": [],
	            "BlkioDeviceReadIOps": [],
	            "BlkioDeviceWriteIOps": [],
	            "CpuPeriod": 0,
	            "CpuQuota": 0,
	            "CpuRealtimePeriod": 0,
	            "CpuRealtimeRuntime": 0,
	            "CpusetCpus": "",
	            "CpusetMems": "",
	            "Devices": [],
	            "DeviceCgroupRules": null,
	            "DeviceRequests": null,
	            "MemoryReservation": 0,
	            "MemorySwap": 8388608000,
	            "MemorySwappiness": null,
	            "OomKillDisable": false,
	            "PidsLimit": null,
	            "Ulimits": null,
	            "CpuCount": 0,
	            "CpuPercent": 0,
	            "IOMaximumIOps": 0,
	            "IOMaximumBandwidth": 0,
	            "MaskedPaths": null,
	            "ReadonlyPaths": null
	        },
	        "GraphDriver": {
	            "Data": {
	                "LowerDir": "/var/lib/docker/overlay2/fd00a5a5773a02b06b7117d4f3d2dae3e9b1818a8852feb1daae32c91259bce4-init/diff:/var/lib/docker/overlay2/049ae54891020b74263d4d0f668244f51ae19df0871773fd59686314976f2fd9/diff",
	                "MergedDir": "/var/lib/docker/overlay2/fd00a5a5773a02b06b7117d4f3d2dae3e9b1818a8852feb1daae32c91259bce4/merged",
	                "UpperDir": "/var/lib/docker/overlay2/fd00a5a5773a02b06b7117d4f3d2dae3e9b1818a8852feb1daae32c91259bce4/diff",
	                "WorkDir": "/var/lib/docker/overlay2/fd00a5a5773a02b06b7117d4f3d2dae3e9b1818a8852feb1daae32c91259bce4/work"
	            },
	            "Name": "overlay2"
	        },
	        "Mounts": [
	            {
	                "Type": "bind",
	                "Source": "/lib/modules",
	                "Destination": "/lib/modules",
	                "Mode": "ro",
	                "RW": false,
	                "Propagation": "rprivate"
	            },
	            {
	                "Type": "volume",
	                "Name": "addons-488129",
	                "Source": "/var/lib/docker/volumes/addons-488129/_data",
	                "Destination": "/var",
	                "Driver": "local",
	                "Mode": "z",
	                "RW": true,
	                "Propagation": ""
	            }
	        ],
	        "Config": {
	            "Hostname": "addons-488129",
	            "Domainname": "",
	            "User": "",
	            "AttachStdin": false,
	            "AttachStdout": false,
	            "AttachStderr": false,
	            "ExposedPorts": {
	                "22/tcp": {},
	                "2376/tcp": {},
	                "32443/tcp": {},
	                "5000/tcp": {},
	                "8443/tcp": {}
	            },
	            "Tty": true,
	            "OpenStdin": false,
	            "StdinOnce": false,
	            "Env": [
	                "container=docker",
	                "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
	            ],
	            "Cmd": null,
	            "Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f",
	            "Volumes": null,
	            "WorkingDir": "/",
	            "Entrypoint": [
	                "/usr/local/bin/entrypoint",
	                "/sbin/init"
	            ],
	            "OnBuild": null,
	            "Labels": {
	                "created_by.minikube.sigs.k8s.io": "true",
	                "mode.minikube.sigs.k8s.io": "addons-488129",
	                "name.minikube.sigs.k8s.io": "addons-488129",
	                "role.minikube.sigs.k8s.io": ""
	            },
	            "StopSignal": "SIGRTMIN+3"
	        },
	        "NetworkSettings": {
	            "Bridge": "",
	            "SandboxID": "5ba7520feb426d380de97983f648385d965f4f5c2f3170afd7c2f9c337f1b84c",
	            "HairpinMode": false,
	            "LinkLocalIPv6Address": "",
	            "LinkLocalIPv6PrefixLen": 0,
	            "Ports": {
	                "22/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33083"
	                    }
	                ],
	                "2376/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33082"
	                    }
	                ],
	                "32443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33079"
	                    }
	                ],
	                "5000/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33081"
	                    }
	                ],
	                "8443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33080"
	                    }
	                ]
	            },
	            "SandboxKey": "/var/run/docker/netns/5ba7520feb42",
	            "SecondaryIPAddresses": null,
	            "SecondaryIPv6Addresses": null,
	            "EndpointID": "",
	            "Gateway": "",
	            "GlobalIPv6Address": "",
	            "GlobalIPv6PrefixLen": 0,
	            "IPAddress": "",
	            "IPPrefixLen": 0,
	            "IPv6Gateway": "",
	            "MacAddress": "",
	            "Networks": {
	                "addons-488129": {
	                    "IPAMConfig": {
	                        "IPv4Address": "192.168.49.2"
	                    },
	                    "Links": null,
	                    "Aliases": [
	                        "dcde7581e9ae",
	                        "addons-488129"
	                    ],
	                    "NetworkID": "6e870a202417bbfbf5c8dbd975bb991a3fdf734085ed6b1a403d120ddd916e7c",
	                    "EndpointID": "b280d003446d9dc9df70defeb12adb8d212e9a3eba594a86b373edf2751d03aa",
	                    "Gateway": "192.168.49.1",
	                    "IPAddress": "192.168.49.2",
	                    "IPPrefixLen": 24,
	                    "IPv6Gateway": "",
	                    "GlobalIPv6Address": "",
	                    "GlobalIPv6PrefixLen": 0,
	                    "MacAddress": "02:42:c0:a8:31:02",
	                    "DriverOpts": null
	                }
	            }
	        }
	    }
	]

                                                
                                                
-- /stdout --
helpers_test.go:239: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p addons-488129 -n addons-488129
helpers_test.go:244: <<< TestAddons/parallel/Ingress FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======>  post-mortem[TestAddons/parallel/Ingress]: minikube logs <======
helpers_test.go:247: (dbg) Run:  out/minikube-linux-arm64 -p addons-488129 logs -n 25
helpers_test.go:247: (dbg) Done: out/minikube-linux-arm64 -p addons-488129 logs -n 25: (1.614892822s)
helpers_test.go:252: TestAddons/parallel/Ingress logs: 
-- stdout --
	* 
	* ==> Audit <==
	* |---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
	| Command |                                            Args                                             |        Profile         |  User   | Version |     Start Time      |      End Time       |
	|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
	| delete  | --all                                                                                       | minikube               | jenkins | v1.32.0 | 01 Dec 23 18:51 UTC | 01 Dec 23 18:51 UTC |
	| delete  | -p download-only-609102                                                                     | download-only-609102   | jenkins | v1.32.0 | 01 Dec 23 18:52 UTC | 01 Dec 23 18:52 UTC |
	| delete  | -p download-only-609102                                                                     | download-only-609102   | jenkins | v1.32.0 | 01 Dec 23 18:52 UTC | 01 Dec 23 18:52 UTC |
	| start   | --download-only -p                                                                          | download-docker-911952 | jenkins | v1.32.0 | 01 Dec 23 18:52 UTC |                     |
	|         | download-docker-911952                                                                      |                        |         |         |                     |                     |
	|         | --alsologtostderr                                                                           |                        |         |         |                     |                     |
	|         | --driver=docker                                                                             |                        |         |         |                     |                     |
	|         | --container-runtime=containerd                                                              |                        |         |         |                     |                     |
	| delete  | -p download-docker-911952                                                                   | download-docker-911952 | jenkins | v1.32.0 | 01 Dec 23 18:52 UTC | 01 Dec 23 18:52 UTC |
	| start   | --download-only -p                                                                          | binary-mirror-036619   | jenkins | v1.32.0 | 01 Dec 23 18:52 UTC |                     |
	|         | binary-mirror-036619                                                                        |                        |         |         |                     |                     |
	|         | --alsologtostderr                                                                           |                        |         |         |                     |                     |
	|         | --binary-mirror                                                                             |                        |         |         |                     |                     |
	|         | http://127.0.0.1:46513                                                                      |                        |         |         |                     |                     |
	|         | --driver=docker                                                                             |                        |         |         |                     |                     |
	|         | --container-runtime=containerd                                                              |                        |         |         |                     |                     |
	| delete  | -p binary-mirror-036619                                                                     | binary-mirror-036619   | jenkins | v1.32.0 | 01 Dec 23 18:52 UTC | 01 Dec 23 18:52 UTC |
	| addons  | disable dashboard -p                                                                        | addons-488129          | jenkins | v1.32.0 | 01 Dec 23 18:52 UTC |                     |
	|         | addons-488129                                                                               |                        |         |         |                     |                     |
	| addons  | enable dashboard -p                                                                         | addons-488129          | jenkins | v1.32.0 | 01 Dec 23 18:52 UTC |                     |
	|         | addons-488129                                                                               |                        |         |         |                     |                     |
	| start   | -p addons-488129 --wait=true                                                                | addons-488129          | jenkins | v1.32.0 | 01 Dec 23 18:52 UTC | 01 Dec 23 18:54 UTC |
	|         | --memory=4000 --alsologtostderr                                                             |                        |         |         |                     |                     |
	|         | --addons=registry                                                                           |                        |         |         |                     |                     |
	|         | --addons=metrics-server                                                                     |                        |         |         |                     |                     |
	|         | --addons=volumesnapshots                                                                    |                        |         |         |                     |                     |
	|         | --addons=csi-hostpath-driver                                                                |                        |         |         |                     |                     |
	|         | --addons=gcp-auth                                                                           |                        |         |         |                     |                     |
	|         | --addons=cloud-spanner                                                                      |                        |         |         |                     |                     |
	|         | --addons=inspektor-gadget                                                                   |                        |         |         |                     |                     |
	|         | --addons=storage-provisioner-rancher                                                        |                        |         |         |                     |                     |
	|         | --addons=nvidia-device-plugin                                                               |                        |         |         |                     |                     |
	|         | --driver=docker                                                                             |                        |         |         |                     |                     |
	|         | --container-runtime=containerd                                                              |                        |         |         |                     |                     |
	|         | --addons=ingress                                                                            |                        |         |         |                     |                     |
	|         | --addons=ingress-dns                                                                        |                        |         |         |                     |                     |
	| ip      | addons-488129 ip                                                                            | addons-488129          | jenkins | v1.32.0 | 01 Dec 23 18:54 UTC | 01 Dec 23 18:54 UTC |
	| addons  | addons-488129 addons disable                                                                | addons-488129          | jenkins | v1.32.0 | 01 Dec 23 18:54 UTC | 01 Dec 23 18:54 UTC |
	|         | registry --alsologtostderr                                                                  |                        |         |         |                     |                     |
	|         | -v=1                                                                                        |                        |         |         |                     |                     |
	| addons  | disable nvidia-device-plugin                                                                | addons-488129          | jenkins | v1.32.0 | 01 Dec 23 18:54 UTC | 01 Dec 23 18:54 UTC |
	|         | -p addons-488129                                                                            |                        |         |         |                     |                     |
	| ssh     | addons-488129 ssh cat                                                                       | addons-488129          | jenkins | v1.32.0 | 01 Dec 23 18:54 UTC | 01 Dec 23 18:54 UTC |
	|         | /opt/local-path-provisioner/pvc-a606cbf4-5360-4438-af2b-a05f87cae59e_default_test-pvc/file1 |                        |         |         |                     |                     |
	| addons  | addons-488129 addons disable                                                                | addons-488129          | jenkins | v1.32.0 | 01 Dec 23 18:54 UTC | 01 Dec 23 18:55 UTC |
	|         | storage-provisioner-rancher                                                                 |                        |         |         |                     |                     |
	|         | --alsologtostderr -v=1                                                                      |                        |         |         |                     |                     |
	| addons  | addons-488129 addons                                                                        | addons-488129          | jenkins | v1.32.0 | 01 Dec 23 18:55 UTC | 01 Dec 23 18:55 UTC |
	|         | disable csi-hostpath-driver                                                                 |                        |         |         |                     |                     |
	|         | --alsologtostderr -v=1                                                                      |                        |         |         |                     |                     |
	| addons  | addons-488129 addons                                                                        | addons-488129          | jenkins | v1.32.0 | 01 Dec 23 18:55 UTC | 01 Dec 23 18:55 UTC |
	|         | disable volumesnapshots                                                                     |                        |         |         |                     |                     |
	|         | --alsologtostderr -v=1                                                                      |                        |         |         |                     |                     |
	| addons  | disable cloud-spanner -p                                                                    | addons-488129          | jenkins | v1.32.0 | 01 Dec 23 18:55 UTC | 01 Dec 23 18:55 UTC |
	|         | addons-488129                                                                               |                        |         |         |                     |                     |
	| addons  | enable headlamp                                                                             | addons-488129          | jenkins | v1.32.0 | 01 Dec 23 18:55 UTC | 01 Dec 23 18:55 UTC |
	|         | -p addons-488129                                                                            |                        |         |         |                     |                     |
	|         | --alsologtostderr -v=1                                                                      |                        |         |         |                     |                     |
	| addons  | addons-488129 addons                                                                        | addons-488129          | jenkins | v1.32.0 | 01 Dec 23 18:55 UTC | 01 Dec 23 18:55 UTC |
	|         | disable metrics-server                                                                      |                        |         |         |                     |                     |
	|         | --alsologtostderr -v=1                                                                      |                        |         |         |                     |                     |
	| addons  | disable inspektor-gadget -p                                                                 | addons-488129          | jenkins | v1.32.0 | 01 Dec 23 18:55 UTC | 01 Dec 23 18:55 UTC |
	|         | addons-488129                                                                               |                        |         |         |                     |                     |
	| ssh     | addons-488129 ssh curl -s                                                                   | addons-488129          | jenkins | v1.32.0 | 01 Dec 23 18:55 UTC | 01 Dec 23 18:55 UTC |
	|         | http://127.0.0.1/ -H 'Host:                                                                 |                        |         |         |                     |                     |
	|         | nginx.example.com'                                                                          |                        |         |         |                     |                     |
	| ip      | addons-488129 ip                                                                            | addons-488129          | jenkins | v1.32.0 | 01 Dec 23 18:55 UTC | 01 Dec 23 18:56 UTC |
	| addons  | addons-488129 addons disable                                                                | addons-488129          | jenkins | v1.32.0 | 01 Dec 23 18:56 UTC | 01 Dec 23 18:56 UTC |
	|         | ingress-dns --alsologtostderr                                                               |                        |         |         |                     |                     |
	|         | -v=1                                                                                        |                        |         |         |                     |                     |
	| addons  | addons-488129 addons disable                                                                | addons-488129          | jenkins | v1.32.0 | 01 Dec 23 18:56 UTC | 01 Dec 23 18:56 UTC |
	|         | ingress --alsologtostderr -v=1                                                              |                        |         |         |                     |                     |
	|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
	
	* 
	* ==> Last Start <==
	* Log file created at: 2023/12/01 18:52:02
	Running on machine: ip-172-31-31-251
	Binary: Built with gc go1.21.4 for linux/arm64
	Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
	I1201 18:52:02.089715  258880 out.go:296] Setting OutFile to fd 1 ...
	I1201 18:52:02.089954  258880 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I1201 18:52:02.089980  258880 out.go:309] Setting ErrFile to fd 2...
	I1201 18:52:02.089998  258880 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I1201 18:52:02.090294  258880 root.go:338] Updating PATH: /home/jenkins/minikube-integration/17703-252966/.minikube/bin
	I1201 18:52:02.090870  258880 out.go:303] Setting JSON to false
	I1201 18:52:02.091820  258880 start.go:128] hostinfo: {"hostname":"ip-172-31-31-251","uptime":5668,"bootTime":1701451054,"procs":168,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1050-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"982e3628-3742-4b3e-bb63-ac1b07660ec7"}
	I1201 18:52:02.091922  258880 start.go:138] virtualization:  
	I1201 18:52:02.094478  258880 out.go:177] * [addons-488129] minikube v1.32.0 on Ubuntu 20.04 (arm64)
	I1201 18:52:02.097073  258880 out.go:177]   - MINIKUBE_LOCATION=17703
	I1201 18:52:02.099208  258880 out.go:177]   - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I1201 18:52:02.097136  258880 notify.go:220] Checking for updates...
	I1201 18:52:02.102925  258880 out.go:177]   - KUBECONFIG=/home/jenkins/minikube-integration/17703-252966/kubeconfig
	I1201 18:52:02.104940  258880 out.go:177]   - MINIKUBE_HOME=/home/jenkins/minikube-integration/17703-252966/.minikube
	I1201 18:52:02.107066  258880 out.go:177]   - MINIKUBE_BIN=out/minikube-linux-arm64
	I1201 18:52:02.109784  258880 out.go:177]   - MINIKUBE_FORCE_SYSTEMD=
	I1201 18:52:02.111904  258880 driver.go:392] Setting default libvirt URI to qemu:///system
	I1201 18:52:02.135673  258880 docker.go:122] docker version: linux-24.0.7:Docker Engine - Community
	I1201 18:52:02.135805  258880 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I1201 18:52:02.219477  258880 info.go:266] docker info: {ID:EOU5:DNGX:XN6V:L2FZ:UXRM:5TWK:EVUR:KC2F:GT7Z:Y4O4:GB77:5PD3 Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:28 OomKillDisable:true NGoroutines:38 SystemTime:2023-12-01 18:52:02.209845996 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1050-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archi
tecture:aarch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8215040000 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-31-251 Labels:[] ExperimentalBuild:false ServerVersion:24.0.7 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f Expected:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f} RuncCommit:{ID:v1.1.10-0-g18a0cb0 Expected:v1.1.10-0-g18a0cb0} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.21.0]] Warnings:<nil>}}
	I1201 18:52:02.219588  258880 docker.go:295] overlay module found
	I1201 18:52:02.222523  258880 out.go:177] * Using the docker driver based on user configuration
	I1201 18:52:02.224252  258880 start.go:298] selected driver: docker
	I1201 18:52:02.224268  258880 start.go:902] validating driver "docker" against <nil>
	I1201 18:52:02.224281  258880 start.go:913] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
	I1201 18:52:02.224969  258880 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I1201 18:52:02.286733  258880 info.go:266] docker info: {ID:EOU5:DNGX:XN6V:L2FZ:UXRM:5TWK:EVUR:KC2F:GT7Z:Y4O4:GB77:5PD3 Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:28 OomKillDisable:true NGoroutines:38 SystemTime:2023-12-01 18:52:02.276794446 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1050-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archi
tecture:aarch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8215040000 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-31-251 Labels:[] ExperimentalBuild:false ServerVersion:24.0.7 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f Expected:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f} RuncCommit:{ID:v1.1.10-0-g18a0cb0 Expected:v1.1.10-0-g18a0cb0} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.21.0]] Warnings:<nil>}}
	I1201 18:52:02.286898  258880 start_flags.go:309] no existing cluster config was found, will generate one from the flags 
	I1201 18:52:02.287123  258880 start_flags.go:931] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
	I1201 18:52:02.288949  258880 out.go:177] * Using Docker driver with root privileges
	I1201 18:52:02.290722  258880 cni.go:84] Creating CNI manager for ""
	I1201 18:52:02.290741  258880 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
	I1201 18:52:02.290753  258880 start_flags.go:318] Found "CNI" CNI - setting NetworkPlugin=cni
	I1201 18:52:02.290767  258880 start_flags.go:323] config:
	{Name:addons-488129 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f Memory:4000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.4 ClusterName:addons-488129 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containe
rd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 AutoPauseInterval:1m0s GPUs:}
	I1201 18:52:02.292918  258880 out.go:177] * Starting control plane node addons-488129 in cluster addons-488129
	I1201 18:52:02.294475  258880 cache.go:121] Beginning downloading kic base image for docker with containerd
	I1201 18:52:02.296392  258880 out.go:177] * Pulling base image ...
	I1201 18:52:02.298121  258880 preload.go:132] Checking if preload exists for k8s version v1.28.4 and runtime containerd
	I1201 18:52:02.298186  258880 preload.go:148] Found local preload: /home/jenkins/minikube-integration/17703-252966/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.4-containerd-overlay2-arm64.tar.lz4
	I1201 18:52:02.298200  258880 cache.go:56] Caching tarball of preloaded images
	I1201 18:52:02.298278  258880 preload.go:174] Found /home/jenkins/minikube-integration/17703-252966/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.4-containerd-overlay2-arm64.tar.lz4 in cache, skipping download
	I1201 18:52:02.298296  258880 cache.go:59] Finished verifying existence of preloaded tar for  v1.28.4 on containerd
	I1201 18:52:02.298663  258880 profile.go:148] Saving config to /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/config.json ...
	I1201 18:52:02.298687  258880 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/config.json: {Name:mk2e2364663712a30ca38a15af8175e460e6d1d5 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1201 18:52:02.298846  258880 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f in local docker daemon
	I1201 18:52:02.316220  258880 cache.go:149] Downloading gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f to local cache
	I1201 18:52:02.316359  258880 image.go:63] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f in local cache directory
	I1201 18:52:02.316384  258880 image.go:66] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f in local cache directory, skipping pull
	I1201 18:52:02.316392  258880 image.go:105] gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f exists in cache, skipping pull
	I1201 18:52:02.316400  258880 cache.go:152] successfully saved gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f as a tarball
	I1201 18:52:02.316410  258880 cache.go:162] Loading gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f from local cache
	I1201 18:52:18.163580  258880 cache.go:164] successfully loaded and using gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f from cached tarball
	I1201 18:52:18.163622  258880 cache.go:194] Successfully downloaded all kic artifacts
	I1201 18:52:18.163676  258880 start.go:365] acquiring machines lock for addons-488129: {Name:mkdbb881ebbce2a1166f82c78d78a82a136a69d2 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I1201 18:52:18.163815  258880 start.go:369] acquired machines lock for "addons-488129" in 116.447µs
	I1201 18:52:18.163845  258880 start.go:93] Provisioning new machine with config: &{Name:addons-488129 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f Memory:4000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.4 ClusterName:addons-488129 Namespace:default APIServerName:minikubeCA A
PIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.28.4 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:
false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 AutoPauseInterval:1m0s GPUs:} &{Name: IP: Port:8443 KubernetesVersion:v1.28.4 ContainerRuntime:containerd ControlPlane:true Worker:true}
	I1201 18:52:18.163929  258880 start.go:125] createHost starting for "" (driver="docker")
	I1201 18:52:18.166553  258880 out.go:204] * Creating docker container (CPUs=2, Memory=4000MB) ...
	I1201 18:52:18.166815  258880 start.go:159] libmachine.API.Create for "addons-488129" (driver="docker")
	I1201 18:52:18.166874  258880 client.go:168] LocalClient.Create starting
	I1201 18:52:18.166998  258880 main.go:141] libmachine: Creating CA: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca.pem
	I1201 18:52:18.554991  258880 main.go:141] libmachine: Creating client certificate: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/cert.pem
	I1201 18:52:18.992660  258880 cli_runner.go:164] Run: docker network inspect addons-488129 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
	W1201 18:52:19.014550  258880 cli_runner.go:211] docker network inspect addons-488129 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
	I1201 18:52:19.014639  258880 network_create.go:281] running [docker network inspect addons-488129] to gather additional debugging logs...
	I1201 18:52:19.014661  258880 cli_runner.go:164] Run: docker network inspect addons-488129
	W1201 18:52:19.031363  258880 cli_runner.go:211] docker network inspect addons-488129 returned with exit code 1
	I1201 18:52:19.031398  258880 network_create.go:284] error running [docker network inspect addons-488129]: docker network inspect addons-488129: exit status 1
	stdout:
	[]
	
	stderr:
	Error response from daemon: network addons-488129 not found
	I1201 18:52:19.031423  258880 network_create.go:286] output of [docker network inspect addons-488129]: -- stdout --
	[]
	
	-- /stdout --
	** stderr ** 
	Error response from daemon: network addons-488129 not found
	
	** /stderr **
	I1201 18:52:19.031524  258880 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
	I1201 18:52:19.049814  258880 network.go:209] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x40027ade30}
	I1201 18:52:19.049859  258880 network_create.go:124] attempt to create docker network addons-488129 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
	I1201 18:52:19.049921  258880 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=addons-488129 addons-488129
	I1201 18:52:19.122621  258880 network_create.go:108] docker network addons-488129 192.168.49.0/24 created
	I1201 18:52:19.122653  258880 kic.go:121] calculated static IP "192.168.49.2" for the "addons-488129" container
	I1201 18:52:19.122742  258880 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
	I1201 18:52:19.139898  258880 cli_runner.go:164] Run: docker volume create addons-488129 --label name.minikube.sigs.k8s.io=addons-488129 --label created_by.minikube.sigs.k8s.io=true
	I1201 18:52:19.158454  258880 oci.go:103] Successfully created a docker volume addons-488129
	I1201 18:52:19.158545  258880 cli_runner.go:164] Run: docker run --rm --name addons-488129-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-488129 --entrypoint /usr/bin/test -v addons-488129:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f -d /var/lib
	I1201 18:52:21.061786  258880 cli_runner.go:217] Completed: docker run --rm --name addons-488129-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-488129 --entrypoint /usr/bin/test -v addons-488129:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f -d /var/lib: (1.903198669s)
	I1201 18:52:21.061820  258880 oci.go:107] Successfully prepared a docker volume addons-488129
	I1201 18:52:21.061863  258880 preload.go:132] Checking if preload exists for k8s version v1.28.4 and runtime containerd
	I1201 18:52:21.061892  258880 kic.go:194] Starting extracting preloaded images to volume ...
	I1201 18:52:21.061983  258880 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/17703-252966/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.4-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v addons-488129:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f -I lz4 -xf /preloaded.tar -C /extractDir
	I1201 18:52:25.288243  258880 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/17703-252966/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.4-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v addons-488129:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f -I lz4 -xf /preloaded.tar -C /extractDir: (4.226216244s)
	I1201 18:52:25.288274  258880 kic.go:203] duration metric: took 4.226383 seconds to extract preloaded images to volume
	W1201 18:52:25.288425  258880 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
	I1201 18:52:25.288565  258880 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
	I1201 18:52:25.353392  258880 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname addons-488129 --name addons-488129 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-488129 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=addons-488129 --network addons-488129 --ip 192.168.49.2 --volume addons-488129:/var --security-opt apparmor=unconfined --memory=4000mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f
	I1201 18:52:25.708998  258880 cli_runner.go:164] Run: docker container inspect addons-488129 --format={{.State.Running}}
	I1201 18:52:25.734733  258880 cli_runner.go:164] Run: docker container inspect addons-488129 --format={{.State.Status}}
	I1201 18:52:25.760182  258880 cli_runner.go:164] Run: docker exec addons-488129 stat /var/lib/dpkg/alternatives/iptables
	I1201 18:52:25.847582  258880 oci.go:144] the created container "addons-488129" has a running status.
	I1201 18:52:25.847607  258880 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/17703-252966/.minikube/machines/addons-488129/id_rsa...
	I1201 18:52:26.195134  258880 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/17703-252966/.minikube/machines/addons-488129/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
	I1201 18:52:26.222200  258880 cli_runner.go:164] Run: docker container inspect addons-488129 --format={{.State.Status}}
	I1201 18:52:26.260864  258880 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
	I1201 18:52:26.260890  258880 kic_runner.go:114] Args: [docker exec --privileged addons-488129 chown docker:docker /home/docker/.ssh/authorized_keys]
	I1201 18:52:26.350798  258880 cli_runner.go:164] Run: docker container inspect addons-488129 --format={{.State.Status}}
	I1201 18:52:26.377906  258880 machine.go:88] provisioning docker machine ...
	I1201 18:52:26.377941  258880 ubuntu.go:169] provisioning hostname "addons-488129"
	I1201 18:52:26.378021  258880 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-488129
	I1201 18:52:26.409606  258880 main.go:141] libmachine: Using SSH client type: native
	I1201 18:52:26.410048  258880 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3be600] 0x3c0d70 <nil>  [] 0s} 127.0.0.1 33083 <nil> <nil>}
	I1201 18:52:26.410069  258880 main.go:141] libmachine: About to run SSH command:
	sudo hostname addons-488129 && echo "addons-488129" | sudo tee /etc/hostname
	I1201 18:52:26.410729  258880 main.go:141] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:56314->127.0.0.1:33083: read: connection reset by peer
	I1201 18:52:29.578762  258880 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-488129
	
	I1201 18:52:29.578847  258880 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-488129
	I1201 18:52:29.598219  258880 main.go:141] libmachine: Using SSH client type: native
	I1201 18:52:29.598624  258880 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3be600] 0x3c0d70 <nil>  [] 0s} 127.0.0.1 33083 <nil> <nil>}
	I1201 18:52:29.598642  258880 main.go:141] libmachine: About to run SSH command:
	
			if ! grep -xq '.*\saddons-488129' /etc/hosts; then
				if grep -xq '127.0.1.1\s.*' /etc/hosts; then
					sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 addons-488129/g' /etc/hosts;
				else 
					echo '127.0.1.1 addons-488129' | sudo tee -a /etc/hosts; 
				fi
			fi
	I1201 18:52:29.745491  258880 main.go:141] libmachine: SSH cmd err, output: <nil>: 
	I1201 18:52:29.745525  258880 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/17703-252966/.minikube CaCertPath:/home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/17703-252966/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/17703-252966/.minikube}
	I1201 18:52:29.745545  258880 ubuntu.go:177] setting up certificates
	I1201 18:52:29.745555  258880 provision.go:83] configureAuth start
	I1201 18:52:29.745611  258880 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-488129
	I1201 18:52:29.764017  258880 provision.go:138] copyHostCerts
	I1201 18:52:29.764092  258880 exec_runner.go:151] cp: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/17703-252966/.minikube/ca.pem (1078 bytes)
	I1201 18:52:29.764214  258880 exec_runner.go:151] cp: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/17703-252966/.minikube/cert.pem (1123 bytes)
	I1201 18:52:29.764265  258880 exec_runner.go:151] cp: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/17703-252966/.minikube/key.pem (1679 bytes)
	I1201 18:52:29.764307  258880 provision.go:112] generating server cert: /home/jenkins/minikube-integration/17703-252966/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca-key.pem org=jenkins.addons-488129 san=[192.168.49.2 127.0.0.1 localhost 127.0.0.1 minikube addons-488129]
	I1201 18:52:30.055838  258880 provision.go:172] copyRemoteCerts
	I1201 18:52:30.055938  258880 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
	I1201 18:52:30.055986  258880 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-488129
	I1201 18:52:30.087771  258880 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33083 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/addons-488129/id_rsa Username:docker}
	I1201 18:52:30.200277  258880 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
	I1201 18:52:30.233782  258880 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/machines/server.pem --> /etc/docker/server.pem (1216 bytes)
	I1201 18:52:30.262886  258880 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
	I1201 18:52:30.291667  258880 provision.go:86] duration metric: configureAuth took 546.099235ms
	I1201 18:52:30.291695  258880 ubuntu.go:193] setting minikube options for container-runtime
	I1201 18:52:30.291926  258880 config.go:182] Loaded profile config "addons-488129": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.4
	I1201 18:52:30.291940  258880 machine.go:91] provisioned docker machine in 3.914014204s
	I1201 18:52:30.291947  258880 client.go:171] LocalClient.Create took 12.125063965s
	I1201 18:52:30.291966  258880 start.go:167] duration metric: libmachine.API.Create for "addons-488129" took 12.12515245s
	I1201 18:52:30.291980  258880 start.go:300] post-start starting for "addons-488129" (driver="docker")
	I1201 18:52:30.291989  258880 start.go:329] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
	I1201 18:52:30.292042  258880 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
	I1201 18:52:30.292084  258880 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-488129
	I1201 18:52:30.310006  258880 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33083 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/addons-488129/id_rsa Username:docker}
	I1201 18:52:30.419057  258880 ssh_runner.go:195] Run: cat /etc/os-release
	I1201 18:52:30.423101  258880 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
	I1201 18:52:30.423136  258880 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
	I1201 18:52:30.423151  258880 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
	I1201 18:52:30.423163  258880 info.go:137] Remote host: Ubuntu 22.04.3 LTS
	I1201 18:52:30.423177  258880 filesync.go:126] Scanning /home/jenkins/minikube-integration/17703-252966/.minikube/addons for local assets ...
	I1201 18:52:30.423239  258880 filesync.go:126] Scanning /home/jenkins/minikube-integration/17703-252966/.minikube/files for local assets ...
	I1201 18:52:30.423267  258880 start.go:303] post-start completed in 131.280281ms
	I1201 18:52:30.423595  258880 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-488129
	I1201 18:52:30.441162  258880 profile.go:148] Saving config to /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/config.json ...
	I1201 18:52:30.441435  258880 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I1201 18:52:30.441489  258880 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-488129
	I1201 18:52:30.460088  258880 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33083 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/addons-488129/id_rsa Username:docker}
	I1201 18:52:30.562449  258880 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
	I1201 18:52:30.568018  258880 start.go:128] duration metric: createHost completed in 12.404074138s
	I1201 18:52:30.568044  258880 start.go:83] releasing machines lock for "addons-488129", held for 12.404216144s
	I1201 18:52:30.568111  258880 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-488129
	I1201 18:52:30.585511  258880 ssh_runner.go:195] Run: cat /version.json
	I1201 18:52:30.585565  258880 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-488129
	I1201 18:52:30.585640  258880 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
	I1201 18:52:30.585700  258880 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-488129
	I1201 18:52:30.608122  258880 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33083 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/addons-488129/id_rsa Username:docker}
	I1201 18:52:30.618063  258880 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33083 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/addons-488129/id_rsa Username:docker}
	I1201 18:52:30.842947  258880 ssh_runner.go:195] Run: systemctl --version
	I1201 18:52:30.848835  258880 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
	I1201 18:52:30.854531  258880 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
	I1201 18:52:30.885486  258880 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
	I1201 18:52:30.885605  258880 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%!p(MISSING), " -exec sh -c "sudo mv {} {}.mk_disabled" ;
	I1201 18:52:30.921069  258880 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
	I1201 18:52:30.921144  258880 start.go:475] detecting cgroup driver to use...
	I1201 18:52:30.921190  258880 detect.go:196] detected "cgroupfs" cgroup driver on host os
	I1201 18:52:30.921273  258880 ssh_runner.go:195] Run: sudo systemctl stop -f crio
	I1201 18:52:30.936175  258880 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
	I1201 18:52:30.949679  258880 docker.go:203] disabling cri-docker service (if available) ...
	I1201 18:52:30.949816  258880 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
	I1201 18:52:30.965984  258880 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
	I1201 18:52:30.982821  258880 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
	I1201 18:52:31.076992  258880 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
	I1201 18:52:31.182637  258880 docker.go:219] disabling docker service ...
	I1201 18:52:31.182724  258880 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
	I1201 18:52:31.205873  258880 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
	I1201 18:52:31.219983  258880 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
	I1201 18:52:31.313081  258880 ssh_runner.go:195] Run: sudo systemctl mask docker.service
	I1201 18:52:31.405213  258880 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
	I1201 18:52:31.419338  258880 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %!s(MISSING) "runtime-endpoint: unix:///run/containerd/containerd.sock
	" | sudo tee /etc/crictl.yaml"
	I1201 18:52:31.440896  258880 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.9"|' /etc/containerd/config.toml"
	I1201 18:52:31.453194  258880 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
	I1201 18:52:31.465761  258880 containerd.go:145] configuring containerd to use "cgroupfs" as cgroup driver...
	I1201 18:52:31.465831  258880 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
	I1201 18:52:31.478317  258880 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I1201 18:52:31.490503  258880 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
	I1201 18:52:31.502740  258880 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I1201 18:52:31.515531  258880 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
	I1201 18:52:31.526973  258880 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
	I1201 18:52:31.539137  258880 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
	I1201 18:52:31.549780  258880 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
	I1201 18:52:31.560434  258880 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1201 18:52:31.652255  258880 ssh_runner.go:195] Run: sudo systemctl restart containerd
	I1201 18:52:31.793049  258880 start.go:522] Will wait 60s for socket path /run/containerd/containerd.sock
	I1201 18:52:31.793152  258880 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
	I1201 18:52:31.798046  258880 start.go:543] Will wait 60s for crictl version
	I1201 18:52:31.798109  258880 ssh_runner.go:195] Run: which crictl
	I1201 18:52:31.802601  258880 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
	I1201 18:52:31.848129  258880 start.go:559] Version:  0.1.0
	RuntimeName:  containerd
	RuntimeVersion:  1.6.25
	RuntimeApiVersion:  v1
	I1201 18:52:31.848217  258880 ssh_runner.go:195] Run: containerd --version
	I1201 18:52:31.877808  258880 ssh_runner.go:195] Run: containerd --version
	I1201 18:52:31.911106  258880 out.go:177] * Preparing Kubernetes v1.28.4 on containerd 1.6.25 ...
	I1201 18:52:31.913016  258880 cli_runner.go:164] Run: docker network inspect addons-488129 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
	I1201 18:52:31.930244  258880 ssh_runner.go:195] Run: grep 192.168.49.1	host.minikube.internal$ /etc/hosts
	I1201 18:52:31.934817  258880 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1	host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I1201 18:52:31.948208  258880 preload.go:132] Checking if preload exists for k8s version v1.28.4 and runtime containerd
	I1201 18:52:31.948275  258880 ssh_runner.go:195] Run: sudo crictl images --output json
	I1201 18:52:31.989073  258880 containerd.go:604] all images are preloaded for containerd runtime.
	I1201 18:52:31.989098  258880 containerd.go:518] Images already preloaded, skipping extraction
	I1201 18:52:31.989162  258880 ssh_runner.go:195] Run: sudo crictl images --output json
	I1201 18:52:32.034981  258880 containerd.go:604] all images are preloaded for containerd runtime.
	I1201 18:52:32.035008  258880 cache_images.go:84] Images are preloaded, skipping loading
	I1201 18:52:32.035068  258880 ssh_runner.go:195] Run: sudo crictl info
	I1201 18:52:32.076754  258880 cni.go:84] Creating CNI manager for ""
	I1201 18:52:32.076782  258880 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
	I1201 18:52:32.076813  258880 kubeadm.go:87] Using pod CIDR: 10.244.0.0/16
	I1201 18:52:32.076835  258880 kubeadm.go:176] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.28.4 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:addons-488129 NodeName:addons-488129 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc
/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
	I1201 18:52:32.076970  258880 kubeadm.go:181] kubeadm config:
	apiVersion: kubeadm.k8s.io/v1beta3
	kind: InitConfiguration
	localAPIEndpoint:
	  advertiseAddress: 192.168.49.2
	  bindPort: 8443
	bootstrapTokens:
	  - groups:
	      - system:bootstrappers:kubeadm:default-node-token
	    ttl: 24h0m0s
	    usages:
	      - signing
	      - authentication
	nodeRegistration:
	  criSocket: unix:///run/containerd/containerd.sock
	  name: "addons-488129"
	  kubeletExtraArgs:
	    node-ip: 192.168.49.2
	  taints: []
	---
	apiVersion: kubeadm.k8s.io/v1beta3
	kind: ClusterConfiguration
	apiServer:
	  certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
	  extraArgs:
	    enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
	controllerManager:
	  extraArgs:
	    allocate-node-cidrs: "true"
	    leader-elect: "false"
	scheduler:
	  extraArgs:
	    leader-elect: "false"
	certificatesDir: /var/lib/minikube/certs
	clusterName: mk
	controlPlaneEndpoint: control-plane.minikube.internal:8443
	etcd:
	  local:
	    dataDir: /var/lib/minikube/etcd
	    extraArgs:
	      proxy-refresh-interval: "70000"
	kubernetesVersion: v1.28.4
	networking:
	  dnsDomain: cluster.local
	  podSubnet: "10.244.0.0/16"
	  serviceSubnet: 10.96.0.0/12
	---
	apiVersion: kubelet.config.k8s.io/v1beta1
	kind: KubeletConfiguration
	authentication:
	  x509:
	    clientCAFile: /var/lib/minikube/certs/ca.crt
	cgroupDriver: cgroupfs
	hairpinMode: hairpin-veth
	runtimeRequestTimeout: 15m
	clusterDomain: "cluster.local"
	# disable disk resource management by default
	imageGCHighThresholdPercent: 100
	evictionHard:
	  nodefs.available: "0%!"(MISSING)
	  nodefs.inodesFree: "0%!"(MISSING)
	  imagefs.available: "0%!"(MISSING)
	failSwapOn: false
	staticPodPath: /etc/kubernetes/manifests
	---
	apiVersion: kubeproxy.config.k8s.io/v1alpha1
	kind: KubeProxyConfiguration
	clusterCIDR: "10.244.0.0/16"
	metricsBindAddress: 0.0.0.0:10249
	conntrack:
	  maxPerCore: 0
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
	  tcpEstablishedTimeout: 0s
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
	  tcpCloseWaitTimeout: 0s
	
	I1201 18:52:32.077045  258880 kubeadm.go:976] kubelet [Unit]
	Wants=containerd.service
	
	[Service]
	ExecStart=
	ExecStart=/var/lib/minikube/binaries/v1.28.4/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --container-runtime-endpoint=unix:///run/containerd/containerd.sock --hostname-override=addons-488129 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
	
	[Install]
	 config:
	{KubernetesVersion:v1.28.4 ClusterName:addons-488129 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:}
	I1201 18:52:32.077120  258880 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.28.4
	I1201 18:52:32.088670  258880 binaries.go:44] Found k8s binaries, skipping transfer
	I1201 18:52:32.088767  258880 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
	I1201 18:52:32.099912  258880 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (385 bytes)
	I1201 18:52:32.122244  258880 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
	I1201 18:52:32.144682  258880 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2102 bytes)
	I1201 18:52:32.166001  258880 ssh_runner.go:195] Run: grep 192.168.49.2	control-plane.minikube.internal$ /etc/hosts
	I1201 18:52:32.170510  258880 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.2	control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I1201 18:52:32.183991  258880 certs.go:56] Setting up /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129 for IP: 192.168.49.2
	I1201 18:52:32.184024  258880 certs.go:190] acquiring lock for shared ca certs: {Name:mk799b1e63d23a413d1b6e34a0169dabbea1b951 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1201 18:52:32.184202  258880 certs.go:204] generating minikubeCA CA: /home/jenkins/minikube-integration/17703-252966/.minikube/ca.key
	I1201 18:52:33.162846  258880 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/17703-252966/.minikube/ca.crt ...
	I1201 18:52:33.162886  258880 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/17703-252966/.minikube/ca.crt: {Name:mk66b03f16995192a6dc09f380b9d45f2dca6f14 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1201 18:52:33.163155  258880 crypto.go:164] Writing key to /home/jenkins/minikube-integration/17703-252966/.minikube/ca.key ...
	I1201 18:52:33.163169  258880 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/17703-252966/.minikube/ca.key: {Name:mkd2b4bd1be55410804d383e14fc4b9ad1ad0042 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1201 18:52:33.164182  258880 certs.go:204] generating proxyClientCA CA: /home/jenkins/minikube-integration/17703-252966/.minikube/proxy-client-ca.key
	I1201 18:52:33.363352  258880 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/17703-252966/.minikube/proxy-client-ca.crt ...
	I1201 18:52:33.363385  258880 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/17703-252966/.minikube/proxy-client-ca.crt: {Name:mk7932373000a6c01b334043e185c4b6d95ca33e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1201 18:52:33.363954  258880 crypto.go:164] Writing key to /home/jenkins/minikube-integration/17703-252966/.minikube/proxy-client-ca.key ...
	I1201 18:52:33.363969  258880 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/17703-252966/.minikube/proxy-client-ca.key: {Name:mk8c3d69dbd55b6b2e372eaa6c54a0a0b4bfe614 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1201 18:52:33.364115  258880 certs.go:319] generating minikube-user signed cert: /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.key
	I1201 18:52:33.364133  258880 crypto.go:68] Generating cert /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.crt with IP's: []
	I1201 18:52:34.366549  258880 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.crt ...
	I1201 18:52:34.366583  258880 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.crt: {Name:mk1e7533bc1e186934ea487c6a95d2af861c7599 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1201 18:52:34.367171  258880 crypto.go:164] Writing key to /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.key ...
	I1201 18:52:34.367190  258880 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.key: {Name:mk9511eb5cf7af3f8bbf45506281507c79aeae23 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1201 18:52:34.367293  258880 certs.go:319] generating minikube signed cert: /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/apiserver.key.dd3b5fb2
	I1201 18:52:34.367315  258880 crypto.go:68] Generating cert /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/apiserver.crt.dd3b5fb2 with IP's: [192.168.49.2 10.96.0.1 127.0.0.1 10.0.0.1]
	I1201 18:52:35.437977  258880 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/apiserver.crt.dd3b5fb2 ...
	I1201 18:52:35.438011  258880 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/apiserver.crt.dd3b5fb2: {Name:mkc3327b1acf78c239b963af6a4c5d2450f78857 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1201 18:52:35.438197  258880 crypto.go:164] Writing key to /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/apiserver.key.dd3b5fb2 ...
	I1201 18:52:35.438212  258880 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/apiserver.key.dd3b5fb2: {Name:mk008b4a8a1557d546126a4cd2b0d132aed68d25 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1201 18:52:35.438304  258880 certs.go:337] copying /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/apiserver.crt.dd3b5fb2 -> /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/apiserver.crt
	I1201 18:52:35.438377  258880 certs.go:341] copying /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/apiserver.key.dd3b5fb2 -> /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/apiserver.key
	I1201 18:52:35.438422  258880 certs.go:319] generating aggregator signed cert: /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/proxy-client.key
	I1201 18:52:35.438436  258880 crypto.go:68] Generating cert /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/proxy-client.crt with IP's: []
	I1201 18:52:35.737582  258880 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/proxy-client.crt ...
	I1201 18:52:35.737614  258880 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/proxy-client.crt: {Name:mk6e4c51f04e0958c8524c8c38b9d70de71044c1 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1201 18:52:35.737813  258880 crypto.go:164] Writing key to /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/proxy-client.key ...
	I1201 18:52:35.737823  258880 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/proxy-client.key: {Name:mkc513eecf6f7a37733039a21edd4b0e9f9050cb Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1201 18:52:35.738497  258880 certs.go:437] found cert: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca-key.pem (1675 bytes)
	I1201 18:52:35.738549  258880 certs.go:437] found cert: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca.pem (1078 bytes)
	I1201 18:52:35.738587  258880 certs.go:437] found cert: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/home/jenkins/minikube-integration/17703-252966/.minikube/certs/cert.pem (1123 bytes)
	I1201 18:52:35.738615  258880 certs.go:437] found cert: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/home/jenkins/minikube-integration/17703-252966/.minikube/certs/key.pem (1679 bytes)
	I1201 18:52:35.739227  258880 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1399 bytes)
	I1201 18:52:35.770299  258880 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
	I1201 18:52:35.798551  258880 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
	I1201 18:52:35.826995  258880 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
	I1201 18:52:35.858715  258880 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
	I1201 18:52:35.890124  258880 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
	I1201 18:52:35.919384  258880 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
	I1201 18:52:35.947296  258880 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
	I1201 18:52:35.975281  258880 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
	I1201 18:52:36.004107  258880 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
	I1201 18:52:36.028438  258880 ssh_runner.go:195] Run: openssl version
	I1201 18:52:36.036149  258880 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
	I1201 18:52:36.049141  258880 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
	I1201 18:52:36.054264  258880 certs.go:480] hashing: -rw-r--r-- 1 root root 1111 Dec  1 18:52 /usr/share/ca-certificates/minikubeCA.pem
	I1201 18:52:36.054338  258880 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
	I1201 18:52:36.064934  258880 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
	I1201 18:52:36.077749  258880 ssh_runner.go:195] Run: ls /var/lib/minikube/certs/etcd
	I1201 18:52:36.082798  258880 certs.go:353] certs directory doesn't exist, likely first start: ls /var/lib/minikube/certs/etcd: Process exited with status 2
	stdout:
	
	stderr:
	ls: cannot access '/var/lib/minikube/certs/etcd': No such file or directory
	I1201 18:52:36.082848  258880 kubeadm.go:404] StartCluster: {Name:addons-488129 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f Memory:4000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.4 ClusterName:addons-488129 Namespace:default APIServerName:minikubeCA APIServerNames:[] APISe
rverIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.28.4 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false Disa
bleMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 AutoPauseInterval:1m0s GPUs:}
	I1201 18:52:36.082924  258880 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
	I1201 18:52:36.082980  258880 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
	I1201 18:52:36.130053  258880 cri.go:89] found id: ""
	I1201 18:52:36.130131  258880 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
	I1201 18:52:36.141500  258880 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
	I1201 18:52:36.152525  258880 kubeadm.go:226] ignoring SystemVerification for kubeadm because of docker driver
	I1201 18:52:36.152610  258880 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
	I1201 18:52:36.163575  258880 kubeadm.go:152] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
	stdout:
	
	stderr:
	ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
	I1201 18:52:36.163638  258880 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.28.4:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml  --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
	I1201 18:52:36.218168  258880 kubeadm.go:322] [init] Using Kubernetes version: v1.28.4
	I1201 18:52:36.218254  258880 kubeadm.go:322] [preflight] Running pre-flight checks
	I1201 18:52:36.263578  258880 kubeadm.go:322] [preflight] The system verification failed. Printing the output from the verification:
	I1201 18:52:36.263672  258880 kubeadm.go:322] KERNEL_VERSION: 5.15.0-1050-aws
	I1201 18:52:36.263712  258880 kubeadm.go:322] OS: Linux
	I1201 18:52:36.263786  258880 kubeadm.go:322] CGROUPS_CPU: enabled
	I1201 18:52:36.263846  258880 kubeadm.go:322] CGROUPS_CPUACCT: enabled
	I1201 18:52:36.263894  258880 kubeadm.go:322] CGROUPS_CPUSET: enabled
	I1201 18:52:36.263944  258880 kubeadm.go:322] CGROUPS_DEVICES: enabled
	I1201 18:52:36.263994  258880 kubeadm.go:322] CGROUPS_FREEZER: enabled
	I1201 18:52:36.264046  258880 kubeadm.go:322] CGROUPS_MEMORY: enabled
	I1201 18:52:36.264093  258880 kubeadm.go:322] CGROUPS_PIDS: enabled
	I1201 18:52:36.264142  258880 kubeadm.go:322] CGROUPS_HUGETLB: enabled
	I1201 18:52:36.264191  258880 kubeadm.go:322] CGROUPS_BLKIO: enabled
	I1201 18:52:36.343896  258880 kubeadm.go:322] [preflight] Pulling images required for setting up a Kubernetes cluster
	I1201 18:52:36.344071  258880 kubeadm.go:322] [preflight] This might take a minute or two, depending on the speed of your internet connection
	I1201 18:52:36.344203  258880 kubeadm.go:322] [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
	I1201 18:52:36.596839  258880 kubeadm.go:322] [certs] Using certificateDir folder "/var/lib/minikube/certs"
	I1201 18:52:36.600565  258880 out.go:204]   - Generating certificates and keys ...
	I1201 18:52:36.600770  258880 kubeadm.go:322] [certs] Using existing ca certificate authority
	I1201 18:52:36.600852  258880 kubeadm.go:322] [certs] Using existing apiserver certificate and key on disk
	I1201 18:52:36.822639  258880 kubeadm.go:322] [certs] Generating "apiserver-kubelet-client" certificate and key
	I1201 18:52:37.134139  258880 kubeadm.go:322] [certs] Generating "front-proxy-ca" certificate and key
	I1201 18:52:37.709861  258880 kubeadm.go:322] [certs] Generating "front-proxy-client" certificate and key
	I1201 18:52:38.261471  258880 kubeadm.go:322] [certs] Generating "etcd/ca" certificate and key
	I1201 18:52:38.469376  258880 kubeadm.go:322] [certs] Generating "etcd/server" certificate and key
	I1201 18:52:38.469757  258880 kubeadm.go:322] [certs] etcd/server serving cert is signed for DNS names [addons-488129 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
	I1201 18:52:38.951099  258880 kubeadm.go:322] [certs] Generating "etcd/peer" certificate and key
	I1201 18:52:38.951455  258880 kubeadm.go:322] [certs] etcd/peer serving cert is signed for DNS names [addons-488129 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
	I1201 18:52:39.507248  258880 kubeadm.go:322] [certs] Generating "etcd/healthcheck-client" certificate and key
	I1201 18:52:40.097407  258880 kubeadm.go:322] [certs] Generating "apiserver-etcd-client" certificate and key
	I1201 18:52:40.509881  258880 kubeadm.go:322] [certs] Generating "sa" key and public key
	I1201 18:52:40.510199  258880 kubeadm.go:322] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
	I1201 18:52:40.796313  258880 kubeadm.go:322] [kubeconfig] Writing "admin.conf" kubeconfig file
	I1201 18:52:41.132978  258880 kubeadm.go:322] [kubeconfig] Writing "kubelet.conf" kubeconfig file
	I1201 18:52:41.682615  258880 kubeadm.go:322] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
	I1201 18:52:43.689486  258880 kubeadm.go:322] [kubeconfig] Writing "scheduler.conf" kubeconfig file
	I1201 18:52:43.690406  258880 kubeadm.go:322] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
	I1201 18:52:43.694784  258880 kubeadm.go:322] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
	I1201 18:52:43.699580  258880 out.go:204]   - Booting up control plane ...
	I1201 18:52:43.699747  258880 kubeadm.go:322] [control-plane] Creating static Pod manifest for "kube-apiserver"
	I1201 18:52:43.699861  258880 kubeadm.go:322] [control-plane] Creating static Pod manifest for "kube-controller-manager"
	I1201 18:52:43.699945  258880 kubeadm.go:322] [control-plane] Creating static Pod manifest for "kube-scheduler"
	I1201 18:52:43.714853  258880 kubeadm.go:322] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
	I1201 18:52:43.714947  258880 kubeadm.go:322] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
	I1201 18:52:43.714984  258880 kubeadm.go:322] [kubelet-start] Starting the kubelet
	I1201 18:52:43.825502  258880 kubeadm.go:322] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
	I1201 18:52:51.827820  258880 kubeadm.go:322] [apiclient] All control plane components are healthy after 8.002916 seconds
	I1201 18:52:51.827973  258880 kubeadm.go:322] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
	I1201 18:52:51.844396  258880 kubeadm.go:322] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
	I1201 18:52:52.371457  258880 kubeadm.go:322] [upload-certs] Skipping phase. Please see --upload-certs
	I1201 18:52:52.371703  258880 kubeadm.go:322] [mark-control-plane] Marking the node addons-488129 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
	I1201 18:52:52.884244  258880 kubeadm.go:322] [bootstrap-token] Using token: de4fqh.pcux2alauhsjd2o5
	I1201 18:52:52.886253  258880 out.go:204]   - Configuring RBAC rules ...
	I1201 18:52:52.886396  258880 kubeadm.go:322] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
	I1201 18:52:52.893934  258880 kubeadm.go:322] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
	I1201 18:52:52.902572  258880 kubeadm.go:322] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
	I1201 18:52:52.907670  258880 kubeadm.go:322] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
	I1201 18:52:52.911647  258880 kubeadm.go:322] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
	I1201 18:52:52.915677  258880 kubeadm.go:322] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
	I1201 18:52:52.930629  258880 kubeadm.go:322] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
	I1201 18:52:53.168787  258880 kubeadm.go:322] [addons] Applied essential addon: CoreDNS
	I1201 18:52:53.298702  258880 kubeadm.go:322] [addons] Applied essential addon: kube-proxy
	I1201 18:52:53.300535  258880 kubeadm.go:322] 
	I1201 18:52:53.300605  258880 kubeadm.go:322] Your Kubernetes control-plane has initialized successfully!
	I1201 18:52:53.300611  258880 kubeadm.go:322] 
	I1201 18:52:53.300683  258880 kubeadm.go:322] To start using your cluster, you need to run the following as a regular user:
	I1201 18:52:53.300689  258880 kubeadm.go:322] 
	I1201 18:52:53.300713  258880 kubeadm.go:322]   mkdir -p $HOME/.kube
	I1201 18:52:53.300768  258880 kubeadm.go:322]   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
	I1201 18:52:53.300816  258880 kubeadm.go:322]   sudo chown $(id -u):$(id -g) $HOME/.kube/config
	I1201 18:52:53.300821  258880 kubeadm.go:322] 
	I1201 18:52:53.300871  258880 kubeadm.go:322] Alternatively, if you are the root user, you can run:
	I1201 18:52:53.300879  258880 kubeadm.go:322] 
	I1201 18:52:53.300925  258880 kubeadm.go:322]   export KUBECONFIG=/etc/kubernetes/admin.conf
	I1201 18:52:53.300929  258880 kubeadm.go:322] 
	I1201 18:52:53.300978  258880 kubeadm.go:322] You should now deploy a pod network to the cluster.
	I1201 18:52:53.301048  258880 kubeadm.go:322] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
	I1201 18:52:53.301112  258880 kubeadm.go:322]   https://kubernetes.io/docs/concepts/cluster-administration/addons/
	I1201 18:52:53.301116  258880 kubeadm.go:322] 
	I1201 18:52:53.301195  258880 kubeadm.go:322] You can now join any number of control-plane nodes by copying certificate authorities
	I1201 18:52:53.301267  258880 kubeadm.go:322] and service account keys on each node and then running the following as root:
	I1201 18:52:53.301271  258880 kubeadm.go:322] 
	I1201 18:52:53.301350  258880 kubeadm.go:322]   kubeadm join control-plane.minikube.internal:8443 --token de4fqh.pcux2alauhsjd2o5 \
	I1201 18:52:53.301446  258880 kubeadm.go:322] 	--discovery-token-ca-cert-hash sha256:6cba72ab59f3057936b959d729bb32b422b624e6e2da6be2a011dda16967004c \
	I1201 18:52:53.301465  258880 kubeadm.go:322] 	--control-plane 
	I1201 18:52:53.301470  258880 kubeadm.go:322] 
	I1201 18:52:53.301548  258880 kubeadm.go:322] Then you can join any number of worker nodes by running the following on each as root:
	I1201 18:52:53.301555  258880 kubeadm.go:322] 
	I1201 18:52:53.301632  258880 kubeadm.go:322] kubeadm join control-plane.minikube.internal:8443 --token de4fqh.pcux2alauhsjd2o5 \
	I1201 18:52:53.301727  258880 kubeadm.go:322] 	--discovery-token-ca-cert-hash sha256:6cba72ab59f3057936b959d729bb32b422b624e6e2da6be2a011dda16967004c 
	I1201 18:52:53.306355  258880 kubeadm.go:322] 	[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1050-aws\n", err: exit status 1
	I1201 18:52:53.306467  258880 kubeadm.go:322] 	[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
	I1201 18:52:53.306482  258880 cni.go:84] Creating CNI manager for ""
	I1201 18:52:53.306490  258880 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
	I1201 18:52:53.308451  258880 out.go:177] * Configuring CNI (Container Networking Interface) ...
	I1201 18:52:53.310407  258880 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
	I1201 18:52:53.316367  258880 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.28.4/kubectl ...
	I1201 18:52:53.316391  258880 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2438 bytes)
	I1201 18:52:53.339796  258880 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
	I1201 18:52:54.272275  258880 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
	I1201 18:52:54.272349  258880 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 18:52:54.272412  258880 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl label nodes minikube.k8s.io/version=v1.32.0 minikube.k8s.io/commit=a7798054841a94294fc1e610bab097fa7942f774 minikube.k8s.io/name=addons-488129 minikube.k8s.io/updated_at=2023_12_01T18_52_54_0700 minikube.k8s.io/primary=true --all --overwrite --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 18:52:54.505812  258880 ops.go:34] apiserver oom_adj: -16
	I1201 18:52:54.505897  258880 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 18:52:54.607066  258880 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 18:52:55.203597  258880 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 18:52:55.703528  258880 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 18:52:56.204155  258880 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 18:52:56.704453  258880 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 18:52:57.203843  258880 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 18:52:57.704111  258880 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 18:52:58.204506  258880 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 18:52:58.704265  258880 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 18:52:59.204355  258880 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 18:52:59.704529  258880 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 18:53:00.204606  258880 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 18:53:00.704427  258880 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 18:53:01.204212  258880 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 18:53:01.703808  258880 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 18:53:02.204274  258880 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 18:53:02.704159  258880 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 18:53:03.204081  258880 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 18:53:03.703846  258880 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 18:53:04.203436  258880 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 18:53:04.704209  258880 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 18:53:05.203730  258880 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 18:53:05.704018  258880 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 18:53:05.840263  258880 kubeadm.go:1088] duration metric: took 11.567974294s to wait for elevateKubeSystemPrivileges.
	I1201 18:53:05.840295  258880 kubeadm.go:406] StartCluster complete in 29.757448771s
	I1201 18:53:05.840322  258880 settings.go:142] acquiring lock: {Name:mk509c4de5b63e24c154062001ac3a5a349afe54 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1201 18:53:05.840446  258880 settings.go:150] Updating kubeconfig:  /home/jenkins/minikube-integration/17703-252966/kubeconfig
	I1201 18:53:05.840935  258880 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/17703-252966/kubeconfig: {Name:mk1b3fc1b8f9b6d7245434b6dbdc3c3d1a4130cc Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1201 18:53:05.843131  258880 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.4/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
	I1201 18:53:05.843430  258880 config.go:182] Loaded profile config "addons-488129": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.4
	I1201 18:53:05.843458  258880 addons.go:499] enable addons start: toEnable=map[ambassador:false auto-pause:false cloud-spanner:true csi-hostpath-driver:true dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:true gvisor:false headlamp:false helm-tiller:false inaccel:false ingress:true ingress-dns:true inspektor-gadget:true istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-device-plugin:true nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:true registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:true volumesnapshots:true]
	I1201 18:53:05.843547  258880 addons.go:69] Setting volumesnapshots=true in profile "addons-488129"
	I1201 18:53:05.843563  258880 addons.go:231] Setting addon volumesnapshots=true in "addons-488129"
	I1201 18:53:05.843641  258880 host.go:66] Checking if "addons-488129" exists ...
	I1201 18:53:05.844084  258880 cli_runner.go:164] Run: docker container inspect addons-488129 --format={{.State.Status}}
	I1201 18:53:05.844711  258880 addons.go:69] Setting ingress=true in profile "addons-488129"
	I1201 18:53:05.844754  258880 addons.go:231] Setting addon ingress=true in "addons-488129"
	I1201 18:53:05.844812  258880 host.go:66] Checking if "addons-488129" exists ...
	I1201 18:53:05.845286  258880 cli_runner.go:164] Run: docker container inspect addons-488129 --format={{.State.Status}}
	I1201 18:53:05.850379  258880 addons.go:69] Setting ingress-dns=true in profile "addons-488129"
	I1201 18:53:05.850413  258880 addons.go:231] Setting addon ingress-dns=true in "addons-488129"
	I1201 18:53:05.850476  258880 host.go:66] Checking if "addons-488129" exists ...
	I1201 18:53:05.850925  258880 cli_runner.go:164] Run: docker container inspect addons-488129 --format={{.State.Status}}
	I1201 18:53:05.851116  258880 addons.go:69] Setting cloud-spanner=true in profile "addons-488129"
	I1201 18:53:05.851132  258880 addons.go:231] Setting addon cloud-spanner=true in "addons-488129"
	I1201 18:53:05.851177  258880 host.go:66] Checking if "addons-488129" exists ...
	I1201 18:53:05.851552  258880 cli_runner.go:164] Run: docker container inspect addons-488129 --format={{.State.Status}}
	I1201 18:53:05.855131  258880 addons.go:69] Setting inspektor-gadget=true in profile "addons-488129"
	I1201 18:53:05.855203  258880 addons.go:231] Setting addon inspektor-gadget=true in "addons-488129"
	I1201 18:53:05.855308  258880 host.go:66] Checking if "addons-488129" exists ...
	I1201 18:53:05.855783  258880 cli_runner.go:164] Run: docker container inspect addons-488129 --format={{.State.Status}}
	I1201 18:53:05.856340  258880 addons.go:69] Setting csi-hostpath-driver=true in profile "addons-488129"
	I1201 18:53:05.856390  258880 addons.go:231] Setting addon csi-hostpath-driver=true in "addons-488129"
	I1201 18:53:05.856430  258880 host.go:66] Checking if "addons-488129" exists ...
	I1201 18:53:05.856834  258880 cli_runner.go:164] Run: docker container inspect addons-488129 --format={{.State.Status}}
	I1201 18:53:05.868993  258880 addons.go:69] Setting default-storageclass=true in profile "addons-488129"
	I1201 18:53:05.869034  258880 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "addons-488129"
	I1201 18:53:05.869422  258880 cli_runner.go:164] Run: docker container inspect addons-488129 --format={{.State.Status}}
	I1201 18:53:05.876550  258880 addons.go:69] Setting metrics-server=true in profile "addons-488129"
	I1201 18:53:05.876623  258880 addons.go:231] Setting addon metrics-server=true in "addons-488129"
	I1201 18:53:05.876712  258880 host.go:66] Checking if "addons-488129" exists ...
	I1201 18:53:05.877211  258880 cli_runner.go:164] Run: docker container inspect addons-488129 --format={{.State.Status}}
	I1201 18:53:05.888197  258880 addons.go:69] Setting gcp-auth=true in profile "addons-488129"
	I1201 18:53:05.888242  258880 mustload.go:65] Loading cluster: addons-488129
	I1201 18:53:05.888556  258880 config.go:182] Loaded profile config "addons-488129": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.4
	I1201 18:53:05.888838  258880 cli_runner.go:164] Run: docker container inspect addons-488129 --format={{.State.Status}}
	I1201 18:53:05.890110  258880 addons.go:69] Setting nvidia-device-plugin=true in profile "addons-488129"
	I1201 18:53:05.928267  258880 addons.go:231] Setting addon nvidia-device-plugin=true in "addons-488129"
	I1201 18:53:05.944836  258880 host.go:66] Checking if "addons-488129" exists ...
	I1201 18:53:05.900726  258880 addons.go:69] Setting registry=true in profile "addons-488129"
	I1201 18:53:05.900778  258880 addons.go:69] Setting storage-provisioner=true in profile "addons-488129"
	I1201 18:53:05.900787  258880 addons.go:69] Setting storage-provisioner-rancher=true in profile "addons-488129"
	I1201 18:53:05.953216  258880 addons_storage_classes.go:33] enableOrDisableStorageClasses storage-provisioner-rancher=true on "addons-488129"
	I1201 18:53:05.976710  258880 addons.go:231] Setting addon registry=true in "addons-488129"
	I1201 18:53:05.976930  258880 host.go:66] Checking if "addons-488129" exists ...
	I1201 18:53:05.977366  258880 cli_runner.go:164] Run: docker container inspect addons-488129 --format={{.State.Status}}
	I1201 18:53:05.988870  258880 addons.go:231] Setting addon storage-provisioner=true in "addons-488129"
	I1201 18:53:05.996741  258880 host.go:66] Checking if "addons-488129" exists ...
	I1201 18:53:05.997365  258880 cli_runner.go:164] Run: docker container inspect addons-488129 --format={{.State.Status}}
	I1201 18:53:05.999811  258880 cli_runner.go:164] Run: docker container inspect addons-488129 --format={{.State.Status}}
	I1201 18:53:06.006063  258880 cli_runner.go:164] Run: docker container inspect addons-488129 --format={{.State.Status}}
	I1201 18:53:06.036216  258880 out.go:177]   - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20231011-8b53cabe0
	I1201 18:53:06.056578  258880 out.go:177]   - Using image gcr.io/cloud-spanner-emulator/emulator:1.5.12
	I1201 18:53:06.063265  258880 addons.go:423] installing /etc/kubernetes/addons/deployment.yaml
	I1201 18:53:06.063345  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/deployment.yaml (1004 bytes)
	I1201 18:53:06.063469  258880 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-488129
	I1201 18:53:06.079019  258880 out.go:177]   - Using image registry.k8s.io/ingress-nginx/controller:v1.9.4
	I1201 18:53:06.086824  258880 out.go:177]   - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20231011-8b53cabe0
	I1201 18:53:06.079316  258880 out.go:177]   - Using image registry.k8s.io/sig-storage/snapshot-controller:v6.1.0
	I1201 18:53:06.099114  258880 addons.go:423] installing /etc/kubernetes/addons/ingress-deploy.yaml
	I1201 18:53:06.101009  258880 out.go:177]   - Using image gcr.io/k8s-minikube/minikube-ingress-dns:0.0.2
	I1201 18:53:06.101018  258880 out.go:177]   - Using image registry.k8s.io/sig-storage/livenessprobe:v2.8.0
	I1201 18:53:06.101032  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-deploy.yaml (16103 bytes)
	I1201 18:53:06.103605  258880 addons.go:423] installing /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml
	I1201 18:53:06.105834  258880 addons.go:423] installing /etc/kubernetes/addons/ingress-dns-pod.yaml
	I1201 18:53:06.105936  258880 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-488129
	I1201 18:53:06.112179  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml (934 bytes)
	I1201 18:53:06.112196  258880 out.go:177]   - Using image ghcr.io/inspektor-gadget/inspektor-gadget:v0.22.0
	I1201 18:53:06.112227  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-dns-pod.yaml (2442 bytes)
	I1201 18:53:06.116252  258880 out.go:177]   - Using image registry.k8s.io/sig-storage/csi-resizer:v1.6.0
	I1201 18:53:06.114157  258880 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-488129
	I1201 18:53:06.114516  258880 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-488129
	I1201 18:53:06.136581  258880 addons.go:423] installing /etc/kubernetes/addons/ig-namespace.yaml
	I1201 18:53:06.136608  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-namespace.yaml (55 bytes)
	I1201 18:53:06.136681  258880 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-488129
	I1201 18:53:06.162203  258880 out.go:177]   - Using image registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0
	I1201 18:53:06.168218  258880 out.go:177]   - Using image registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
	I1201 18:53:06.171399  258880 out.go:177]   - Using image registry.k8s.io/sig-storage/csi-attacher:v4.0.0
	I1201 18:53:06.169468  258880 addons.go:231] Setting addon default-storageclass=true in "addons-488129"
	I1201 18:53:06.172679  258880 host.go:66] Checking if "addons-488129" exists ...
	I1201 18:53:06.173540  258880 host.go:66] Checking if "addons-488129" exists ...
	I1201 18:53:06.175242  258880 out.go:177]   - Using image registry.k8s.io/metrics-server/metrics-server:v0.6.4
	I1201 18:53:06.185115  258880 addons.go:423] installing /etc/kubernetes/addons/metrics-apiservice.yaml
	I1201 18:53:06.185133  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
	I1201 18:53:06.185184  258880 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-488129
	I1201 18:53:06.183277  258880 out.go:177]   - Using image registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.7.0
	I1201 18:53:06.183825  258880 cli_runner.go:164] Run: docker container inspect addons-488129 --format={{.State.Status}}
	I1201 18:53:06.234194  258880 out.go:177]   - Using image registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.6.0
	I1201 18:53:06.244204  258880 out.go:177]   - Using image registry.k8s.io/sig-storage/hostpathplugin:v1.9.0
	I1201 18:53:06.236749  258880 kapi.go:248] "coredns" deployment in "kube-system" namespace and "addons-488129" context rescaled to 1 replicas
	I1201 18:53:06.247562  258880 out.go:177]   - Using image gcr.io/k8s-minikube/storage-provisioner:v5
	I1201 18:53:06.245954  258880 addons.go:423] installing /etc/kubernetes/addons/rbac-external-attacher.yaml
	I1201 18:53:06.245984  258880 start.go:223] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.28.4 ContainerRuntime:containerd ControlPlane:true Worker:true}
	I1201 18:53:06.254143  258880 out.go:177] * Verifying Kubernetes components...
	I1201 18:53:06.252008  258880 addons.go:423] installing /etc/kubernetes/addons/storage-provisioner.yaml
	I1201 18:53:06.252021  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/rbac-external-attacher.yaml (3073 bytes)
	I1201 18:53:06.257735  258880 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I1201 18:53:06.260059  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
	I1201 18:53:06.260153  258880 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-488129
	I1201 18:53:06.260373  258880 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-488129
	I1201 18:53:06.286862  258880 out.go:177]   - Using image docker.io/registry:2.8.3
	I1201 18:53:06.290285  258880 out.go:177]   - Using image gcr.io/k8s-minikube/kube-registry-proxy:0.0.5
	I1201 18:53:06.292718  258880 addons.go:423] installing /etc/kubernetes/addons/registry-rc.yaml
	I1201 18:53:06.292740  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-rc.yaml (798 bytes)
	I1201 18:53:06.292810  258880 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-488129
	I1201 18:53:06.313919  258880 out.go:177]   - Using image nvcr.io/nvidia/k8s-device-plugin:v0.14.3
	I1201 18:53:06.309649  258880 addons.go:231] Setting addon storage-provisioner-rancher=true in "addons-488129"
	I1201 18:53:06.315729  258880 host.go:66] Checking if "addons-488129" exists ...
	I1201 18:53:06.316247  258880 cli_runner.go:164] Run: docker container inspect addons-488129 --format={{.State.Status}}
	I1201 18:53:06.320009  258880 addons.go:423] installing /etc/kubernetes/addons/nvidia-device-plugin.yaml
	I1201 18:53:06.320030  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/nvidia-device-plugin.yaml (1966 bytes)
	I1201 18:53:06.320133  258880 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-488129
	I1201 18:53:06.341180  258880 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33083 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/addons-488129/id_rsa Username:docker}
	I1201 18:53:06.420632  258880 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33083 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/addons-488129/id_rsa Username:docker}
	I1201 18:53:06.431019  258880 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33083 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/addons-488129/id_rsa Username:docker}
	I1201 18:53:06.486776  258880 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33083 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/addons-488129/id_rsa Username:docker}
	I1201 18:53:06.512889  258880 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33083 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/addons-488129/id_rsa Username:docker}
	I1201 18:53:06.532246  258880 addons.go:423] installing /etc/kubernetes/addons/storageclass.yaml
	I1201 18:53:06.532269  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
	I1201 18:53:06.532332  258880 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-488129
	I1201 18:53:06.551836  258880 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33083 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/addons-488129/id_rsa Username:docker}
	I1201 18:53:06.555914  258880 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33083 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/addons-488129/id_rsa Username:docker}
	I1201 18:53:06.572034  258880 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33083 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/addons-488129/id_rsa Username:docker}
	I1201 18:53:06.572034  258880 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33083 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/addons-488129/id_rsa Username:docker}
	I1201 18:53:06.572807  258880 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33083 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/addons-488129/id_rsa Username:docker}
	I1201 18:53:06.628166  258880 out.go:177]   - Using image docker.io/rancher/local-path-provisioner:v0.0.22
	I1201 18:53:06.627013  258880 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33083 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/addons-488129/id_rsa Username:docker}
	I1201 18:53:06.632250  258880 out.go:177]   - Using image docker.io/busybox:stable
	I1201 18:53:06.633852  258880 addons.go:423] installing /etc/kubernetes/addons/storage-provisioner-rancher.yaml
	I1201 18:53:06.633870  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner-rancher.yaml (3113 bytes)
	I1201 18:53:06.633931  258880 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-488129
	I1201 18:53:06.661709  258880 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33083 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/addons-488129/id_rsa Username:docker}
	I1201 18:53:06.846965  258880 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.4/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml": (1.003798411s)
	I1201 18:53:06.847220  258880 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.4/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^        forward . \/etc\/resolv.conf.*/i \        hosts {\n           192.168.49.1 host.minikube.internal\n           fallthrough\n        }' -e '/^        errors *$/i \        log' | sudo /var/lib/minikube/binaries/v1.28.4/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
	I1201 18:53:06.847918  258880 node_ready.go:35] waiting up to 6m0s for node "addons-488129" to be "Ready" ...
	I1201 18:53:06.852580  258880 node_ready.go:49] node "addons-488129" has status "Ready":"True"
	I1201 18:53:06.852645  258880 node_ready.go:38] duration metric: took 4.708647ms waiting for node "addons-488129" to be "Ready" ...
	I1201 18:53:06.852670  258880 pod_ready.go:35] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
	I1201 18:53:06.863166  258880 pod_ready.go:78] waiting up to 6m0s for pod "coredns-5dd5756b68-7cnd6" in "kube-system" namespace to be "Ready" ...
	I1201 18:53:07.061233  258880 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml
	I1201 18:53:07.146552  258880 addons.go:423] installing /etc/kubernetes/addons/registry-svc.yaml
	I1201 18:53:07.146625  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-svc.yaml (398 bytes)
	I1201 18:53:07.175503  258880 addons.go:423] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml
	I1201 18:53:07.175529  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml (6471 bytes)
	I1201 18:53:07.180406  258880 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/deployment.yaml
	I1201 18:53:07.227013  258880 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml
	I1201 18:53:07.262684  258880 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml
	I1201 18:53:07.310347  258880 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
	I1201 18:53:07.442641  258880 addons.go:423] installing /etc/kubernetes/addons/registry-proxy.yaml
	I1201 18:53:07.442668  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-proxy.yaml (947 bytes)
	I1201 18:53:07.470961  258880 addons.go:423] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml
	I1201 18:53:07.470988  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml (23126 bytes)
	I1201 18:53:07.482788  258880 addons.go:423] installing /etc/kubernetes/addons/ig-serviceaccount.yaml
	I1201 18:53:07.482815  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-serviceaccount.yaml (80 bytes)
	I1201 18:53:07.503401  258880 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
	I1201 18:53:07.507632  258880 addons.go:423] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
	I1201 18:53:07.507651  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1907 bytes)
	I1201 18:53:07.555555  258880 addons.go:423] installing /etc/kubernetes/addons/rbac-hostpath.yaml
	I1201 18:53:07.555582  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/rbac-hostpath.yaml (4266 bytes)
	I1201 18:53:07.557603  258880 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml
	I1201 18:53:07.623064  258880 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml
	I1201 18:53:07.663382  258880 addons.go:423] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml
	I1201 18:53:07.663409  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml (19582 bytes)
	I1201 18:53:07.702166  258880 addons.go:423] installing /etc/kubernetes/addons/ig-role.yaml
	I1201 18:53:07.702191  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-role.yaml (210 bytes)
	I1201 18:53:07.809569  258880 addons.go:423] installing /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml
	I1201 18:53:07.809597  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml (3545 bytes)
	I1201 18:53:07.815064  258880 addons.go:423] installing /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml
	I1201 18:53:07.815090  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml (3038 bytes)
	I1201 18:53:07.827461  258880 addons.go:423] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
	I1201 18:53:07.827487  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
	I1201 18:53:07.909967  258880 addons.go:423] installing /etc/kubernetes/addons/ig-rolebinding.yaml
	I1201 18:53:07.909992  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-rolebinding.yaml (244 bytes)
	I1201 18:53:07.947468  258880 addons.go:423] installing /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
	I1201 18:53:07.947494  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml (1475 bytes)
	I1201 18:53:08.039046  258880 addons.go:423] installing /etc/kubernetes/addons/rbac-external-provisioner.yaml
	I1201 18:53:08.039069  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/rbac-external-provisioner.yaml (4442 bytes)
	I1201 18:53:08.095578  258880 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
	I1201 18:53:08.124087  258880 addons.go:423] installing /etc/kubernetes/addons/metrics-server-service.yaml
	I1201 18:53:08.124115  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
	I1201 18:53:08.222680  258880 addons.go:423] installing /etc/kubernetes/addons/rbac-external-resizer.yaml
	I1201 18:53:08.222709  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/rbac-external-resizer.yaml (2943 bytes)
	I1201 18:53:08.227936  258880 addons.go:423] installing /etc/kubernetes/addons/ig-clusterrole.yaml
	I1201 18:53:08.227962  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-clusterrole.yaml (1485 bytes)
	I1201 18:53:08.377142  258880 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
	I1201 18:53:08.439864  258880 addons.go:423] installing /etc/kubernetes/addons/ig-clusterrolebinding.yaml
	I1201 18:53:08.439891  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-clusterrolebinding.yaml (274 bytes)
	I1201 18:53:08.489054  258880 addons.go:423] installing /etc/kubernetes/addons/rbac-external-snapshotter.yaml
	I1201 18:53:08.489122  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/rbac-external-snapshotter.yaml (3149 bytes)
	I1201 18:53:08.622388  258880 addons.go:423] installing /etc/kubernetes/addons/ig-crd.yaml
	I1201 18:53:08.622452  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-crd.yaml (5216 bytes)
	I1201 18:53:08.755059  258880 addons.go:423] installing /etc/kubernetes/addons/csi-hostpath-attacher.yaml
	I1201 18:53:08.755135  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-attacher.yaml (2143 bytes)
	I1201 18:53:08.807126  258880 addons.go:423] installing /etc/kubernetes/addons/ig-daemonset.yaml
	I1201 18:53:08.807216  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-daemonset.yaml (7741 bytes)
	I1201 18:53:08.887000  258880 pod_ready.go:102] pod "coredns-5dd5756b68-7cnd6" in "kube-system" namespace has status "Ready":"False"
	I1201 18:53:08.988603  258880 addons.go:423] installing /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml
	I1201 18:53:08.988630  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml (1274 bytes)
	I1201 18:53:09.044044  258880 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/ig-namespace.yaml -f /etc/kubernetes/addons/ig-serviceaccount.yaml -f /etc/kubernetes/addons/ig-role.yaml -f /etc/kubernetes/addons/ig-rolebinding.yaml -f /etc/kubernetes/addons/ig-clusterrole.yaml -f /etc/kubernetes/addons/ig-clusterrolebinding.yaml -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-daemonset.yaml
	I1201 18:53:09.215678  258880 addons.go:423] installing /etc/kubernetes/addons/csi-hostpath-plugin.yaml
	I1201 18:53:09.215702  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-plugin.yaml (8201 bytes)
	I1201 18:53:09.337393  258880 addons.go:423] installing /etc/kubernetes/addons/csi-hostpath-resizer.yaml
	I1201 18:53:09.337420  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-resizer.yaml (2191 bytes)
	I1201 18:53:09.363253  258880 addons.go:423] installing /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
	I1201 18:53:09.363279  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-storageclass.yaml (846 bytes)
	I1201 18:53:09.389710  258880 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.4/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^        forward . \/etc\/resolv.conf.*/i \        hosts {\n           192.168.49.1 host.minikube.internal\n           fallthrough\n        }' -e '/^        errors *$/i \        log' | sudo /var/lib/minikube/binaries/v1.28.4/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -": (2.542462843s)
	I1201 18:53:09.389733  258880 start.go:929] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
	I1201 18:53:09.389760  258880 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml: (2.328446416s)
	I1201 18:53:09.398785  258880 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
	I1201 18:53:09.861760  258880 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/deployment.yaml: (2.681193003s)
	I1201 18:53:10.458864  258880 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml: (3.231777339s)
	I1201 18:53:10.887046  258880 pod_ready.go:102] pod "coredns-5dd5756b68-7cnd6" in "kube-system" namespace has status "Ready":"False"
	I1201 18:53:12.887460  258880 pod_ready.go:102] pod "coredns-5dd5756b68-7cnd6" in "kube-system" namespace has status "Ready":"False"
	I1201 18:53:12.990844  258880 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_application_credentials.json (162 bytes)
	I1201 18:53:12.990924  258880 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-488129
	I1201 18:53:13.021435  258880 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33083 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/addons-488129/id_rsa Username:docker}
	I1201 18:53:13.333792  258880 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_cloud_project (12 bytes)
	I1201 18:53:13.454114  258880 addons.go:231] Setting addon gcp-auth=true in "addons-488129"
	I1201 18:53:13.454193  258880 host.go:66] Checking if "addons-488129" exists ...
	I1201 18:53:13.454728  258880 cli_runner.go:164] Run: docker container inspect addons-488129 --format={{.State.Status}}
	I1201 18:53:13.457925  258880 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml: (6.195157065s)
	I1201 18:53:13.458004  258880 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: (6.147589235s)
	I1201 18:53:13.458197  258880 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (5.954769861s)
	I1201 18:53:13.458268  258880 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml: (5.900644237s)
	I1201 18:53:13.458371  258880 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml: (5.83528464s)
	I1201 18:53:13.458388  258880 addons.go:467] Verifying addon registry=true in "addons-488129"
	I1201 18:53:13.460651  258880 out.go:177] * Verifying registry addon...
	I1201 18:53:13.458512  258880 addons.go:467] Verifying addon ingress=true in "addons-488129"
	I1201 18:53:13.458693  258880 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (5.363080733s)
	I1201 18:53:13.458763  258880 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (5.081594482s)
	I1201 18:53:13.458843  258880 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/ig-namespace.yaml -f /etc/kubernetes/addons/ig-serviceaccount.yaml -f /etc/kubernetes/addons/ig-role.yaml -f /etc/kubernetes/addons/ig-rolebinding.yaml -f /etc/kubernetes/addons/ig-clusterrole.yaml -f /etc/kubernetes/addons/ig-clusterrolebinding.yaml -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-daemonset.yaml: (4.41475832s)
	I1201 18:53:13.463649  258880 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=registry" in ns "kube-system" ...
	I1201 18:53:13.467387  258880 out.go:177] * Verifying ingress addon...
	W1201 18:53:13.464002  258880 addons.go:449] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
	stdout:
	customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
	customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
	customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
	serviceaccount/snapshot-controller created
	clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
	clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
	role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
	rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
	deployment.apps/snapshot-controller created
	
	stderr:
	error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
	ensure CRDs are installed first
	I1201 18:53:13.464018  258880 addons.go:467] Verifying addon metrics-server=true in "addons-488129"
	I1201 18:53:13.470248  258880 kapi.go:75] Waiting for pod with label "app.kubernetes.io/name=ingress-nginx" in ns "ingress-nginx" ...
	I1201 18:53:13.470397  258880 retry.go:31] will retry after 177.495854ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
	stdout:
	customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
	customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
	customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
	serviceaccount/snapshot-controller created
	clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
	clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
	role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
	rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
	deployment.apps/snapshot-controller created
	
	stderr:
	error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
	ensure CRDs are installed first
	I1201 18:53:13.479628  258880 kapi.go:86] Found 2 Pods for label selector kubernetes.io/minikube-addons=registry
	I1201 18:53:13.479667  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:13.487353  258880 ssh_runner.go:195] Run: cat /var/lib/minikube/google_application_credentials.json
	I1201 18:53:13.487412  258880 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-488129
	I1201 18:53:13.489778  258880 kapi.go:86] Found 3 Pods for label selector app.kubernetes.io/name=ingress-nginx
	I1201 18:53:13.489808  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	W1201 18:53:13.497030  258880 out.go:239] ! Enabling 'default-storageclass' returned an error: running callbacks: [Error making standard the default storage class: Error while marking storage class local-path as non-default: Operation cannot be fulfilled on storageclasses.storage.k8s.io "local-path": the object has been modified; please apply your changes to the latest version and try again]
	I1201 18:53:13.501384  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:13.503920  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:13.537988  258880 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33083 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/addons-488129/id_rsa Username:docker}
	I1201 18:53:13.648500  258880 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
	I1201 18:53:14.050099  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:14.050761  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:14.515681  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:14.521133  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:14.913711  258880 pod_ready.go:102] pod "coredns-5dd5756b68-7cnd6" in "kube-system" namespace has status "Ready":"False"
	I1201 18:53:14.941832  258880 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml: (5.542993602s)
	I1201 18:53:14.941912  258880 addons.go:467] Verifying addon csi-hostpath-driver=true in "addons-488129"
	I1201 18:53:14.946090  258880 out.go:177] * Verifying csi-hostpath-driver addon...
	I1201 18:53:14.949357  258880 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=csi-hostpath-driver" in ns "kube-system" ...
	I1201 18:53:14.991808  258880 kapi.go:86] Found 3 Pods for label selector kubernetes.io/minikube-addons=csi-hostpath-driver
	I1201 18:53:14.991835  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:15.008262  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:15.020043  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:15.022625  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:15.396262  258880 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (1.747713019s)
	I1201 18:53:15.396365  258880 ssh_runner.go:235] Completed: cat /var/lib/minikube/google_application_credentials.json: (1.908985347s)
	I1201 18:53:15.400114  258880 out.go:177]   - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20231011-8b53cabe0
	I1201 18:53:15.402622  258880 out.go:177]   - Using image gcr.io/k8s-minikube/gcp-auth-webhook:v0.1.0
	I1201 18:53:15.404883  258880 addons.go:423] installing /etc/kubernetes/addons/gcp-auth-ns.yaml
	I1201 18:53:15.404935  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/gcp-auth-ns.yaml (700 bytes)
	I1201 18:53:15.429348  258880 addons.go:423] installing /etc/kubernetes/addons/gcp-auth-service.yaml
	I1201 18:53:15.429425  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/gcp-auth-service.yaml (788 bytes)
	I1201 18:53:15.453954  258880 addons.go:423] installing /etc/kubernetes/addons/gcp-auth-webhook.yaml
	I1201 18:53:15.454023  258880 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/gcp-auth-webhook.yaml (5432 bytes)
	I1201 18:53:15.481008  258880 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml
	I1201 18:53:15.510624  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:15.513307  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:15.517919  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:16.011857  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:16.014507  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:16.019861  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:16.366949  258880 addons.go:467] Verifying addon gcp-auth=true in "addons-488129"
	I1201 18:53:16.369489  258880 out.go:177] * Verifying gcp-auth addon...
	I1201 18:53:16.372408  258880 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=gcp-auth" in ns "gcp-auth" ...
	I1201 18:53:16.393786  258880 kapi.go:86] Found 1 Pods for label selector kubernetes.io/minikube-addons=gcp-auth
	I1201 18:53:16.393858  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:16.406981  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:16.506565  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:16.510042  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:16.514577  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:16.913369  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:17.009873  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:17.010853  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:17.014829  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:17.386084  258880 pod_ready.go:102] pod "coredns-5dd5756b68-7cnd6" in "kube-system" namespace has status "Ready":"False"
	I1201 18:53:17.411485  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:17.508800  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:17.515576  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:17.517428  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:17.911746  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:18.007559  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:18.016144  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:18.018764  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:18.410940  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:18.509176  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:18.510170  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:18.513733  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:18.912516  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:19.007291  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:19.012238  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:19.018686  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:19.394378  258880 pod_ready.go:102] pod "coredns-5dd5756b68-7cnd6" in "kube-system" namespace has status "Ready":"False"
	I1201 18:53:19.411080  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:19.527235  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:19.528252  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:19.531706  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:19.912089  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:20.023456  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:20.029214  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:20.030432  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:20.411270  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:20.508499  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:20.525520  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:20.526511  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:20.911165  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:21.006629  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:21.011977  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:21.016769  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:21.410879  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:21.508965  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:21.517121  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:21.522772  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:21.886260  258880 pod_ready.go:102] pod "coredns-5dd5756b68-7cnd6" in "kube-system" namespace has status "Ready":"False"
	I1201 18:53:21.910828  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:22.006851  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:22.008881  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:22.017844  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:22.411405  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:22.506759  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:22.509265  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:22.514273  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:22.910549  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:23.008912  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:23.009351  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:23.017951  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:23.411554  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:23.507896  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:23.510763  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:23.515788  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:23.911804  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:24.005958  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:24.010275  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:24.016847  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:24.386204  258880 pod_ready.go:102] pod "coredns-5dd5756b68-7cnd6" in "kube-system" namespace has status "Ready":"False"
	I1201 18:53:24.411597  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:24.507297  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:24.512053  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:24.515986  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:24.911596  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:25.007019  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:25.009750  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:25.016434  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:25.411168  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:25.507541  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:25.508992  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:25.514804  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:25.911517  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:26.007318  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:26.009755  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:26.017543  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:26.386252  258880 pod_ready.go:102] pod "coredns-5dd5756b68-7cnd6" in "kube-system" namespace has status "Ready":"False"
	I1201 18:53:26.411386  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:26.511409  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:26.512649  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:26.517172  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:26.911324  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:27.006734  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:27.009018  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:27.018653  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:27.411578  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:27.507610  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:27.509775  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:27.513577  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:27.911624  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:28.006692  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:28.008724  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:28.014778  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:28.386386  258880 pod_ready.go:102] pod "coredns-5dd5756b68-7cnd6" in "kube-system" namespace has status "Ready":"False"
	I1201 18:53:28.410460  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:28.507863  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:28.510286  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:28.514747  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:28.910752  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:29.007084  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:29.008431  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:29.014605  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:29.413404  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:29.506007  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:29.509878  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:29.514453  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:29.910758  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:30.006087  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:30.009237  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:30.027395  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:30.386415  258880 pod_ready.go:102] pod "coredns-5dd5756b68-7cnd6" in "kube-system" namespace has status "Ready":"False"
	I1201 18:53:30.415513  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:30.506651  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:30.509845  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:30.513597  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:30.911755  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:31.007472  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:31.009677  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:31.016251  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:31.411098  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:31.508240  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:31.509257  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:31.513649  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:31.911322  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:32.007798  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:32.009701  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:32.024163  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:32.410514  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:32.508939  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:32.513699  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:32.514862  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:32.886736  258880 pod_ready.go:102] pod "coredns-5dd5756b68-7cnd6" in "kube-system" namespace has status "Ready":"False"
	I1201 18:53:32.910920  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:33.007912  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:33.009086  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:33.019029  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:33.412277  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:33.509644  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:33.518396  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:33.526774  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:33.911670  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:34.008622  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:34.009299  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:34.015497  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:34.411723  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:34.506602  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:34.509051  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:34.514003  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:34.911263  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:35.006009  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:35.009171  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:35.017020  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:35.386766  258880 pod_ready.go:102] pod "coredns-5dd5756b68-7cnd6" in "kube-system" namespace has status "Ready":"False"
	I1201 18:53:35.412250  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:35.507108  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:35.508953  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:35.513700  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:35.910702  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:36.007123  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:36.014617  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:36.018834  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:36.412290  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:36.505845  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:36.509092  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:36.514592  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:36.912413  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:37.009562  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:37.015779  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:37.020306  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:37.389777  258880 pod_ready.go:102] pod "coredns-5dd5756b68-7cnd6" in "kube-system" namespace has status "Ready":"False"
	I1201 18:53:37.413361  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:37.506996  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:37.509794  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:37.515142  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:37.910842  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:38.007654  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:38.009531  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:38.018785  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:38.411280  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:38.507522  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:38.509632  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:38.516791  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:38.911079  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:39.006915  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:39.009680  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:39.018879  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:39.411121  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:39.507400  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:39.510262  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:39.514719  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:39.886407  258880 pod_ready.go:102] pod "coredns-5dd5756b68-7cnd6" in "kube-system" namespace has status "Ready":"False"
	I1201 18:53:39.914387  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:40.006084  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:40.009986  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:40.020324  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:40.411784  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:40.508209  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:40.511934  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:40.515399  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:40.911502  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:41.009538  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:41.015871  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:41.019725  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:41.411967  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:41.510542  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:41.514462  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:41.519033  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:41.911388  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:42.009158  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:42.009894  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:42.019556  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:42.387642  258880 pod_ready.go:102] pod "coredns-5dd5756b68-7cnd6" in "kube-system" namespace has status "Ready":"False"
	I1201 18:53:42.411473  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:42.539277  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:42.555090  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:42.592278  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:42.911682  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:43.020339  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:43.022333  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:43.027146  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:43.412021  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:43.510181  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:43.511495  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:43.517484  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:43.885948  258880 pod_ready.go:92] pod "coredns-5dd5756b68-7cnd6" in "kube-system" namespace has status "Ready":"True"
	I1201 18:53:43.885972  258880 pod_ready.go:81] duration metric: took 37.022737003s waiting for pod "coredns-5dd5756b68-7cnd6" in "kube-system" namespace to be "Ready" ...
	I1201 18:53:43.885985  258880 pod_ready.go:78] waiting up to 6m0s for pod "coredns-5dd5756b68-wd4xp" in "kube-system" namespace to be "Ready" ...
	I1201 18:53:43.888208  258880 pod_ready.go:97] error getting pod "coredns-5dd5756b68-wd4xp" in "kube-system" namespace (skipping!): pods "coredns-5dd5756b68-wd4xp" not found
	I1201 18:53:43.888234  258880 pod_ready.go:81] duration metric: took 2.241032ms waiting for pod "coredns-5dd5756b68-wd4xp" in "kube-system" namespace to be "Ready" ...
	E1201 18:53:43.888245  258880 pod_ready.go:66] WaitExtra: waitPodCondition: error getting pod "coredns-5dd5756b68-wd4xp" in "kube-system" namespace (skipping!): pods "coredns-5dd5756b68-wd4xp" not found
	I1201 18:53:43.888252  258880 pod_ready.go:78] waiting up to 6m0s for pod "etcd-addons-488129" in "kube-system" namespace to be "Ready" ...
	I1201 18:53:43.893736  258880 pod_ready.go:92] pod "etcd-addons-488129" in "kube-system" namespace has status "Ready":"True"
	I1201 18:53:43.893765  258880 pod_ready.go:81] duration metric: took 5.500335ms waiting for pod "etcd-addons-488129" in "kube-system" namespace to be "Ready" ...
	I1201 18:53:43.893781  258880 pod_ready.go:78] waiting up to 6m0s for pod "kube-apiserver-addons-488129" in "kube-system" namespace to be "Ready" ...
	I1201 18:53:43.906259  258880 pod_ready.go:92] pod "kube-apiserver-addons-488129" in "kube-system" namespace has status "Ready":"True"
	I1201 18:53:43.906282  258880 pod_ready.go:81] duration metric: took 12.493427ms waiting for pod "kube-apiserver-addons-488129" in "kube-system" namespace to be "Ready" ...
	I1201 18:53:43.906294  258880 pod_ready.go:78] waiting up to 6m0s for pod "kube-controller-manager-addons-488129" in "kube-system" namespace to be "Ready" ...
	I1201 18:53:43.911117  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:43.914272  258880 pod_ready.go:92] pod "kube-controller-manager-addons-488129" in "kube-system" namespace has status "Ready":"True"
	I1201 18:53:43.914300  258880 pod_ready.go:81] duration metric: took 7.997259ms waiting for pod "kube-controller-manager-addons-488129" in "kube-system" namespace to be "Ready" ...
	I1201 18:53:43.914313  258880 pod_ready.go:78] waiting up to 6m0s for pod "kube-proxy-7gj6v" in "kube-system" namespace to be "Ready" ...
	I1201 18:53:44.008907  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:44.009175  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:44.016085  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:44.083818  258880 pod_ready.go:92] pod "kube-proxy-7gj6v" in "kube-system" namespace has status "Ready":"True"
	I1201 18:53:44.083845  258880 pod_ready.go:81] duration metric: took 169.523826ms waiting for pod "kube-proxy-7gj6v" in "kube-system" namespace to be "Ready" ...
	I1201 18:53:44.083859  258880 pod_ready.go:78] waiting up to 6m0s for pod "kube-scheduler-addons-488129" in "kube-system" namespace to be "Ready" ...
	I1201 18:53:44.410748  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:44.483674  258880 pod_ready.go:92] pod "kube-scheduler-addons-488129" in "kube-system" namespace has status "Ready":"True"
	I1201 18:53:44.483703  258880 pod_ready.go:81] duration metric: took 399.835358ms waiting for pod "kube-scheduler-addons-488129" in "kube-system" namespace to be "Ready" ...
	I1201 18:53:44.483714  258880 pod_ready.go:38] duration metric: took 37.631019986s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
	I1201 18:53:44.483731  258880 api_server.go:52] waiting for apiserver process to appear ...
	I1201 18:53:44.483798  258880 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I1201 18:53:44.499876  258880 api_server.go:72] duration metric: took 38.248000146s to wait for apiserver process to appear ...
	I1201 18:53:44.499906  258880 api_server.go:88] waiting for apiserver healthz status ...
	I1201 18:53:44.499924  258880 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
	I1201 18:53:44.509893  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:44.511858  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:44.513134  258880 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
	ok
	I1201 18:53:44.514496  258880 api_server.go:141] control plane version: v1.28.4
	I1201 18:53:44.514570  258880 api_server.go:131] duration metric: took 14.655575ms to wait for apiserver health ...
	I1201 18:53:44.514599  258880 system_pods.go:43] waiting for kube-system pods to appear ...
	I1201 18:53:44.517891  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:44.691298  258880 system_pods.go:59] 18 kube-system pods found
	I1201 18:53:44.691332  258880 system_pods.go:61] "coredns-5dd5756b68-7cnd6" [c2fd3dad-e975-43bb-9a8e-374a23034a6b] Running
	I1201 18:53:44.691341  258880 system_pods.go:61] "csi-hostpath-attacher-0" [aac66903-8217-450f-822b-012f260f7029] Pending / Ready:ContainersNotReady (containers with unready status: [csi-attacher]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-attacher])
	I1201 18:53:44.691350  258880 system_pods.go:61] "csi-hostpath-resizer-0" [0c7bc8da-9bc4-4728-897c-e90dbe7970cd] Pending / Ready:ContainersNotReady (containers with unready status: [csi-resizer]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-resizer])
	I1201 18:53:44.691359  258880 system_pods.go:61] "csi-hostpathplugin-tzvpq" [46de56fc-fa53-4164-a6ba-b36576e4ad84] Pending / Ready:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter])
	I1201 18:53:44.691366  258880 system_pods.go:61] "etcd-addons-488129" [1cdf3478-7dd2-4aeb-ab50-b40d30796abb] Running
	I1201 18:53:44.691380  258880 system_pods.go:61] "kindnet-vlkvw" [77a27644-f084-4864-82db-bd46a895c2e7] Running
	I1201 18:53:44.691388  258880 system_pods.go:61] "kube-apiserver-addons-488129" [1eee77ec-5c9e-49ab-b45b-52bc9970654f] Running
	I1201 18:53:44.691394  258880 system_pods.go:61] "kube-controller-manager-addons-488129" [90f990ff-4c34-4368-8f87-a016137e4a1f] Running
	I1201 18:53:44.691409  258880 system_pods.go:61] "kube-ingress-dns-minikube" [a8ea6287-3f31-4001-a60a-96eea450815d] Running / Ready:ContainersNotReady (containers with unready status: [minikube-ingress-dns]) / ContainersReady:ContainersNotReady (containers with unready status: [minikube-ingress-dns])
	I1201 18:53:44.691421  258880 system_pods.go:61] "kube-proxy-7gj6v" [a29da598-4eeb-41bf-ba04-578cd98c49d1] Running
	I1201 18:53:44.691427  258880 system_pods.go:61] "kube-scheduler-addons-488129" [598362d5-2808-47d6-80c8-0ae9a4509742] Running
	I1201 18:53:44.691434  258880 system_pods.go:61] "metrics-server-7c66d45ddc-zppw9" [f20afbef-3118-48da-90f0-2c4f64e46b0b] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
	I1201 18:53:44.691445  258880 system_pods.go:61] "nvidia-device-plugin-daemonset-c8qcb" [d66b677c-4402-4f2d-972e-f1971fa0edab] Pending / Ready:ContainersNotReady (containers with unready status: [nvidia-device-plugin-ctr]) / ContainersReady:ContainersNotReady (containers with unready status: [nvidia-device-plugin-ctr])
	I1201 18:53:44.691453  258880 system_pods.go:61] "registry-proxy-wbp5h" [fbc66db0-d824-4235-9b7f-01ca7a98db44] Pending / Ready:ContainersNotReady (containers with unready status: [registry-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [registry-proxy])
	I1201 18:53:44.691461  258880 system_pods.go:61] "registry-rc92j" [2efe823d-0a41-44bf-8c9f-092cbf70ad2d] Pending / Ready:ContainersNotReady (containers with unready status: [registry]) / ContainersReady:ContainersNotReady (containers with unready status: [registry])
	I1201 18:53:44.691471  258880 system_pods.go:61] "snapshot-controller-58dbcc7b99-5j6xt" [e5536eaf-5588-42ba-8e97-2d886e17b480] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
	I1201 18:53:44.691482  258880 system_pods.go:61] "snapshot-controller-58dbcc7b99-nthll" [20526f5e-4d91-4ed0-8815-8628a7f85781] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
	I1201 18:53:44.691488  258880 system_pods.go:61] "storage-provisioner" [9da2e018-961a-4849-a6af-2667470a6ab9] Running
	I1201 18:53:44.691505  258880 system_pods.go:74] duration metric: took 176.886797ms to wait for pod list to return data ...
	I1201 18:53:44.691514  258880 default_sa.go:34] waiting for default service account to be created ...
	I1201 18:53:44.882383  258880 default_sa.go:45] found service account: "default"
	I1201 18:53:44.882411  258880 default_sa.go:55] duration metric: took 190.88497ms for default service account to be created ...
	I1201 18:53:44.882421  258880 system_pods.go:116] waiting for k8s-apps to be running ...
	I1201 18:53:44.911807  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:45.006963  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:45.009305  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:45.027625  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:45.109501  258880 system_pods.go:86] 18 kube-system pods found
	I1201 18:53:45.109544  258880 system_pods.go:89] "coredns-5dd5756b68-7cnd6" [c2fd3dad-e975-43bb-9a8e-374a23034a6b] Running
	I1201 18:53:45.109557  258880 system_pods.go:89] "csi-hostpath-attacher-0" [aac66903-8217-450f-822b-012f260f7029] Pending / Ready:ContainersNotReady (containers with unready status: [csi-attacher]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-attacher])
	I1201 18:53:45.109566  258880 system_pods.go:89] "csi-hostpath-resizer-0" [0c7bc8da-9bc4-4728-897c-e90dbe7970cd] Pending / Ready:ContainersNotReady (containers with unready status: [csi-resizer]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-resizer])
	I1201 18:53:45.109577  258880 system_pods.go:89] "csi-hostpathplugin-tzvpq" [46de56fc-fa53-4164-a6ba-b36576e4ad84] Pending / Ready:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter])
	I1201 18:53:45.109584  258880 system_pods.go:89] "etcd-addons-488129" [1cdf3478-7dd2-4aeb-ab50-b40d30796abb] Running
	I1201 18:53:45.109590  258880 system_pods.go:89] "kindnet-vlkvw" [77a27644-f084-4864-82db-bd46a895c2e7] Running
	I1201 18:53:45.109596  258880 system_pods.go:89] "kube-apiserver-addons-488129" [1eee77ec-5c9e-49ab-b45b-52bc9970654f] Running
	I1201 18:53:45.109602  258880 system_pods.go:89] "kube-controller-manager-addons-488129" [90f990ff-4c34-4368-8f87-a016137e4a1f] Running
	I1201 18:53:45.109611  258880 system_pods.go:89] "kube-ingress-dns-minikube" [a8ea6287-3f31-4001-a60a-96eea450815d] Running / Ready:ContainersNotReady (containers with unready status: [minikube-ingress-dns]) / ContainersReady:ContainersNotReady (containers with unready status: [minikube-ingress-dns])
	I1201 18:53:45.109619  258880 system_pods.go:89] "kube-proxy-7gj6v" [a29da598-4eeb-41bf-ba04-578cd98c49d1] Running
	I1201 18:53:45.109625  258880 system_pods.go:89] "kube-scheduler-addons-488129" [598362d5-2808-47d6-80c8-0ae9a4509742] Running
	I1201 18:53:45.109635  258880 system_pods.go:89] "metrics-server-7c66d45ddc-zppw9" [f20afbef-3118-48da-90f0-2c4f64e46b0b] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
	I1201 18:53:45.109644  258880 system_pods.go:89] "nvidia-device-plugin-daemonset-c8qcb" [d66b677c-4402-4f2d-972e-f1971fa0edab] Pending / Ready:ContainersNotReady (containers with unready status: [nvidia-device-plugin-ctr]) / ContainersReady:ContainersNotReady (containers with unready status: [nvidia-device-plugin-ctr])
	I1201 18:53:45.109655  258880 system_pods.go:89] "registry-proxy-wbp5h" [fbc66db0-d824-4235-9b7f-01ca7a98db44] Pending / Ready:ContainersNotReady (containers with unready status: [registry-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [registry-proxy])
	I1201 18:53:45.109663  258880 system_pods.go:89] "registry-rc92j" [2efe823d-0a41-44bf-8c9f-092cbf70ad2d] Pending / Ready:ContainersNotReady (containers with unready status: [registry]) / ContainersReady:ContainersNotReady (containers with unready status: [registry])
	I1201 18:53:45.109671  258880 system_pods.go:89] "snapshot-controller-58dbcc7b99-5j6xt" [e5536eaf-5588-42ba-8e97-2d886e17b480] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
	I1201 18:53:45.109678  258880 system_pods.go:89] "snapshot-controller-58dbcc7b99-nthll" [20526f5e-4d91-4ed0-8815-8628a7f85781] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
	I1201 18:53:45.109689  258880 system_pods.go:89] "storage-provisioner" [9da2e018-961a-4849-a6af-2667470a6ab9] Running
	I1201 18:53:45.109698  258880 system_pods.go:126] duration metric: took 227.270198ms to wait for k8s-apps to be running ...
	I1201 18:53:45.109706  258880 system_svc.go:44] waiting for kubelet service to be running ....
	I1201 18:53:45.109774  258880 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I1201 18:53:45.136174  258880 system_svc.go:56] duration metric: took 26.455293ms WaitForService to wait for kubelet.
	I1201 18:53:45.136266  258880 kubeadm.go:581] duration metric: took 38.884398837s to wait for : map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] ...
	I1201 18:53:45.136342  258880 node_conditions.go:102] verifying NodePressure condition ...
	I1201 18:53:45.287631  258880 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
	I1201 18:53:45.287673  258880 node_conditions.go:123] node cpu capacity is 2
	I1201 18:53:45.287688  258880 node_conditions.go:105] duration metric: took 151.301446ms to run NodePressure ...
	I1201 18:53:45.287706  258880 start.go:228] waiting for startup goroutines ...
	I1201 18:53:45.410869  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:45.507737  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:45.510465  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:45.514280  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:45.916512  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:46.019469  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:46.020417  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:46.025655  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:46.411945  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:46.510524  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:46.514648  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:46.518698  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:46.911866  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:47.023387  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:47.032423  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:47.033719  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:47.412792  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:47.509335  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:47.518257  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:47.519322  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:47.912094  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:48.022735  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:48.023749  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:48.025546  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:48.411831  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:48.509057  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:48.516966  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:48.521806  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:48.912487  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:49.009830  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:49.016277  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:49.024105  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:49.412797  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:49.511531  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:49.512449  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:49.518376  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:49.912244  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:50.017659  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:50.023994  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:50.033456  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:50.411372  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:50.506085  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:50.509465  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:50.513983  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:50.911005  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:51.016148  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:51.018387  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:51.022332  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:51.411539  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:51.514279  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:51.517814  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:51.519406  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:51.910754  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:52.013701  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:52.014793  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:52.017462  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:52.411523  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:52.508030  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:52.513766  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:52.514764  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:52.911270  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:53.006833  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:53.018405  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:53.020162  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:53.411018  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:53.510790  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:53.511896  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:53.519735  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:53.911165  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:54.007312  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:54.035221  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:54.038312  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:54.412025  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:54.507881  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:54.509550  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:54.514970  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:54.912603  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:55.009199  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:55.015761  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:55.024365  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:55.412416  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:55.506448  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:55.510132  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:55.514193  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:55.915731  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:56.007309  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:56.010467  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:56.016036  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:56.411442  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:56.506091  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:56.509010  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:56.513662  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:56.910412  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:57.006861  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:57.008957  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:57.017556  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:57.411579  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:57.508356  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:57.513752  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:57.517629  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:57.911033  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:58.007355  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:58.012627  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:58.017953  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:58.411226  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:58.509074  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:58.511886  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:58.515154  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:58.910814  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:59.006091  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:59.008860  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:59.017022  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:59.410827  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:53:59.506733  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:53:59.509591  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:53:59.514624  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:53:59.912704  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:54:00.009441  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:00.021019  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:54:00.036881  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:54:00.410845  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I1201 18:54:00.509837  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:00.511933  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:54:00.515361  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:54:00.911665  258880 kapi.go:107] duration metric: took 44.539254912s to wait for kubernetes.io/minikube-addons=gcp-auth ...
	I1201 18:54:00.914244  258880 out.go:177] * Your GCP credentials will now be mounted into every pod created in the addons-488129 cluster.
	I1201 18:54:00.916356  258880 out.go:177] * If you don't want your credentials mounted into a specific pod, add a label with the `gcp-auth-skip-secret` key to your pod configuration.
	I1201 18:54:00.918354  258880 out.go:177] * If you want existing pods to be mounted with credentials, either recreate them or rerun addons enable with --refresh.
	I1201 18:54:01.008790  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:01.013032  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:54:01.020430  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:54:01.506736  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:54:01.510041  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:01.515070  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:54:02.008391  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:54:02.009172  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:02.022303  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:54:02.507487  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:54:02.512428  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:02.515450  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:54:03.008343  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:54:03.014104  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:03.018312  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:54:03.506596  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:54:03.511663  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:03.516842  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:54:04.006559  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:54:04.009892  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:04.016576  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:54:04.508349  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:54:04.515058  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:04.517383  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:54:05.006639  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:54:05.013674  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:05.019975  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:54:05.506488  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:54:05.509720  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:05.514823  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:54:06.009326  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:54:06.014933  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:06.019071  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:54:06.506340  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I1201 18:54:06.509841  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:06.516168  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:54:07.006609  258880 kapi.go:107] duration metric: took 53.542967914s to wait for kubernetes.io/minikube-addons=registry ...
	I1201 18:54:07.013917  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:07.017361  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:54:07.508972  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:07.514947  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:54:08.009226  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:08.019744  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:54:08.509751  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:08.516797  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:54:09.009548  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:09.018751  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:54:09.508689  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:09.514217  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:54:10.008643  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:10.014599  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:54:10.509472  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:10.514847  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:54:11.008353  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:11.015039  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:54:11.508675  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:11.514726  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:54:12.009363  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:12.015794  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:54:12.509063  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:12.514081  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:54:13.009117  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:13.015507  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:54:13.508267  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:13.513462  258880 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I1201 18:54:14.011836  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:14.027346  258880 kapi.go:107] duration metric: took 59.077984238s to wait for kubernetes.io/minikube-addons=csi-hostpath-driver ...
	I1201 18:54:14.509102  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:15.008514  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:15.508267  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:16.009251  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:16.508975  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:17.008850  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:17.508797  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:18.009096  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:18.508790  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:19.008539  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:19.509157  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:20.008548  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:20.508315  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:21.009050  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:21.509121  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:22.009035  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:22.508827  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:23.010345  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:23.509085  258880 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I1201 18:54:24.011104  258880 kapi.go:107] duration metric: took 1m10.540852385s to wait for app.kubernetes.io/name=ingress-nginx ...
	I1201 18:54:24.013260  258880 out.go:177] * Enabled addons: nvidia-device-plugin, cloud-spanner, ingress-dns, storage-provisioner, inspektor-gadget, metrics-server, storage-provisioner-rancher, volumesnapshots, gcp-auth, registry, csi-hostpath-driver, ingress
	I1201 18:54:24.015444  258880 addons.go:502] enable addons completed in 1m18.171969057s: enabled=[nvidia-device-plugin cloud-spanner ingress-dns storage-provisioner inspektor-gadget metrics-server storage-provisioner-rancher volumesnapshots gcp-auth registry csi-hostpath-driver ingress]
	I1201 18:54:24.015508  258880 start.go:233] waiting for cluster config update ...
	I1201 18:54:24.015531  258880 start.go:242] writing updated cluster config ...
	I1201 18:54:24.015875  258880 ssh_runner.go:195] Run: rm -f paused
	I1201 18:54:24.342659  258880 start.go:600] kubectl: 1.28.4, cluster: 1.28.4 (minor skew: 0)
	I1201 18:54:24.344758  258880 out.go:177] * Done! kubectl is now configured to use "addons-488129" cluster and "default" namespace by default
	
	* 
	* ==> container status <==
	* CONTAINER           IMAGE               CREATED             STATE               NAME                      ATTEMPT             POD ID              POD
	3b9be2d2fd676       dd1b12fcb6097       6 seconds ago       Exited              hello-world-app           2                   e10d4940f4fa4       hello-world-app-5d77478584-24z8n
	b124ff6b18bf5       f09fc93534f6a       33 seconds ago      Running             nginx                     0                   84b4b13fe9d5d       nginx
	d1674986f6b83       14b04e7ab95a8       50 seconds ago      Running             headlamp                  0                   0c0b84ed712fb       headlamp-777fd4b855-n5kz2
	af29b6684aed3       af594c6a879f2       2 minutes ago       Exited              patch                     2                   8a5c99c0d61bb       ingress-nginx-admission-patch-5694m
	7874cfff5c364       2a5f29343eb03       2 minutes ago       Running             gcp-auth                  0                   bbc59a27978ac       gcp-auth-d4c87556c-bg8zr
	1e8ac2fbf06b1       af594c6a879f2       2 minutes ago       Exited              create                    0                   7b810b8bc0f75       ingress-nginx-admission-create-8bd7j
	a7a755c4d1e6a       97e04611ad434       2 minutes ago       Running             coredns                   0                   6c6ed488b86cd       coredns-5dd5756b68-7cnd6
	4a1990b8f0d3a       ba04bb24b9575       3 minutes ago       Running             storage-provisioner       0                   92bcd0bdea585       storage-provisioner
	22b7e6d47d9f4       04b4eaa3d3db8       3 minutes ago       Running             kindnet-cni               0                   c2a07d743c346       kindnet-vlkvw
	dc03101da0017       3ca3ca488cf13       3 minutes ago       Running             kube-proxy                0                   22b748ffc9f37       kube-proxy-7gj6v
	480e877a31796       04b4c447bb9d4       3 minutes ago       Running             kube-apiserver            0                   e3e6c06c7bd7a       kube-apiserver-addons-488129
	d7d1b3ff7e10d       9961cbceaf234       3 minutes ago       Running             kube-controller-manager   0                   4d85cb24956c1       kube-controller-manager-addons-488129
	026c258cdfa60       9cdd6470f48c8       3 minutes ago       Running             etcd                      0                   bdd0ce7e7a166       etcd-addons-488129
	a2dc008f66885       05c284c929889       3 minutes ago       Running             kube-scheduler            0                   6d9cc4e7ba5dd       kube-scheduler-addons-488129
	
	* 
	* ==> containerd <==
	* Dec 01 18:56:19 addons-488129 containerd[747]: time="2023-12-01T18:56:19.279966583Z" level=info msg="StartContainer for \"3b9be2d2fd676b8e02da39b06d73a9a8ffca8bf2cc6d19c7b3ab27de9ad9ff9b\""
	Dec 01 18:56:19 addons-488129 containerd[747]: time="2023-12-01T18:56:19.354417287Z" level=info msg="StartContainer for \"3b9be2d2fd676b8e02da39b06d73a9a8ffca8bf2cc6d19c7b3ab27de9ad9ff9b\" returns successfully"
	Dec 01 18:56:19 addons-488129 containerd[747]: time="2023-12-01T18:56:19.379680068Z" level=info msg="shim disconnected" id=3b9be2d2fd676b8e02da39b06d73a9a8ffca8bf2cc6d19c7b3ab27de9ad9ff9b
	Dec 01 18:56:19 addons-488129 containerd[747]: time="2023-12-01T18:56:19.379742025Z" level=warning msg="cleaning up after shim disconnected" id=3b9be2d2fd676b8e02da39b06d73a9a8ffca8bf2cc6d19c7b3ab27de9ad9ff9b namespace=k8s.io
	Dec 01 18:56:19 addons-488129 containerd[747]: time="2023-12-01T18:56:19.379753143Z" level=info msg="cleaning up dead shim"
	Dec 01 18:56:19 addons-488129 containerd[747]: time="2023-12-01T18:56:19.402713993Z" level=warning msg="cleanup warnings time=\"2023-12-01T18:56:19Z\" level=info msg=\"starting signal loop\" namespace=k8s.io pid=11984 runtime=io.containerd.runc.v2\n"
	Dec 01 18:56:19 addons-488129 containerd[747]: time="2023-12-01T18:56:19.497743747Z" level=info msg="RemoveContainer for \"3e1d7966fcbbc004ff70a114d141da0de283e28cfa2c704761eb02d18601bb7f\""
	Dec 01 18:56:19 addons-488129 containerd[747]: time="2023-12-01T18:56:19.504551916Z" level=info msg="RemoveContainer for \"3e1d7966fcbbc004ff70a114d141da0de283e28cfa2c704761eb02d18601bb7f\" returns successfully"
	Dec 01 18:56:20 addons-488129 containerd[747]: time="2023-12-01T18:56:20.255106350Z" level=info msg="Kill container \"5a4e71eefda89dc132d689de97311f45998966a5f8aac13d421d0469d74200f7\""
	Dec 01 18:56:20 addons-488129 containerd[747]: time="2023-12-01T18:56:20.345865280Z" level=info msg="shim disconnected" id=5a4e71eefda89dc132d689de97311f45998966a5f8aac13d421d0469d74200f7
	Dec 01 18:56:20 addons-488129 containerd[747]: time="2023-12-01T18:56:20.345988094Z" level=warning msg="cleaning up after shim disconnected" id=5a4e71eefda89dc132d689de97311f45998966a5f8aac13d421d0469d74200f7 namespace=k8s.io
	Dec 01 18:56:20 addons-488129 containerd[747]: time="2023-12-01T18:56:20.346001181Z" level=info msg="cleaning up dead shim"
	Dec 01 18:56:20 addons-488129 containerd[747]: time="2023-12-01T18:56:20.357985963Z" level=warning msg="cleanup warnings time=\"2023-12-01T18:56:20Z\" level=info msg=\"starting signal loop\" namespace=k8s.io pid=12016 runtime=io.containerd.runc.v2\n"
	Dec 01 18:56:20 addons-488129 containerd[747]: time="2023-12-01T18:56:20.361048247Z" level=info msg="StopContainer for \"5a4e71eefda89dc132d689de97311f45998966a5f8aac13d421d0469d74200f7\" returns successfully"
	Dec 01 18:56:20 addons-488129 containerd[747]: time="2023-12-01T18:56:20.361639410Z" level=info msg="StopPodSandbox for \"c23792e09593c7229d6134fd552a8ac761fc4fcd72a4d1aff427f42c0ebd6cfb\""
	Dec 01 18:56:20 addons-488129 containerd[747]: time="2023-12-01T18:56:20.361715012Z" level=info msg="Container to stop \"5a4e71eefda89dc132d689de97311f45998966a5f8aac13d421d0469d74200f7\" must be in running or unknown state, current state \"CONTAINER_EXITED\""
	Dec 01 18:56:20 addons-488129 containerd[747]: time="2023-12-01T18:56:20.394386011Z" level=info msg="shim disconnected" id=c23792e09593c7229d6134fd552a8ac761fc4fcd72a4d1aff427f42c0ebd6cfb
	Dec 01 18:56:20 addons-488129 containerd[747]: time="2023-12-01T18:56:20.394653506Z" level=warning msg="cleaning up after shim disconnected" id=c23792e09593c7229d6134fd552a8ac761fc4fcd72a4d1aff427f42c0ebd6cfb namespace=k8s.io
	Dec 01 18:56:20 addons-488129 containerd[747]: time="2023-12-01T18:56:20.394682897Z" level=info msg="cleaning up dead shim"
	Dec 01 18:56:20 addons-488129 containerd[747]: time="2023-12-01T18:56:20.405962596Z" level=warning msg="cleanup warnings time=\"2023-12-01T18:56:20Z\" level=info msg=\"starting signal loop\" namespace=k8s.io pid=12047 runtime=io.containerd.runc.v2\n"
	Dec 01 18:56:20 addons-488129 containerd[747]: time="2023-12-01T18:56:20.461357744Z" level=info msg="TearDown network for sandbox \"c23792e09593c7229d6134fd552a8ac761fc4fcd72a4d1aff427f42c0ebd6cfb\" successfully"
	Dec 01 18:56:20 addons-488129 containerd[747]: time="2023-12-01T18:56:20.461543582Z" level=info msg="StopPodSandbox for \"c23792e09593c7229d6134fd552a8ac761fc4fcd72a4d1aff427f42c0ebd6cfb\" returns successfully"
	Dec 01 18:56:20 addons-488129 containerd[747]: time="2023-12-01T18:56:20.500920744Z" level=info msg="RemoveContainer for \"5a4e71eefda89dc132d689de97311f45998966a5f8aac13d421d0469d74200f7\""
	Dec 01 18:56:20 addons-488129 containerd[747]: time="2023-12-01T18:56:20.506925525Z" level=info msg="RemoveContainer for \"5a4e71eefda89dc132d689de97311f45998966a5f8aac13d421d0469d74200f7\" returns successfully"
	Dec 01 18:56:20 addons-488129 containerd[747]: time="2023-12-01T18:56:20.507727633Z" level=error msg="ContainerStatus for \"5a4e71eefda89dc132d689de97311f45998966a5f8aac13d421d0469d74200f7\" failed" error="rpc error: code = NotFound desc = an error occurred when try to find container \"5a4e71eefda89dc132d689de97311f45998966a5f8aac13d421d0469d74200f7\": not found"
	
	* 
	* ==> coredns [a7a755c4d1e6a1b5852c8697a0454a39e220b117769dc8c4a27f5664aac525a6] <==
	* [INFO] 10.244.0.19:47294 - 62336 "AAAA IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000143976s
	[INFO] 10.244.0.19:39404 - 47854 "A IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001873065s
	[INFO] 10.244.0.19:47294 - 47718 "A IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001772249s
	[INFO] 10.244.0.19:39404 - 33948 "AAAA IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.002283995s
	[INFO] 10.244.0.19:47294 - 38352 "AAAA IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001870144s
	[INFO] 10.244.0.19:39404 - 61130 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000100923s
	[INFO] 10.244.0.19:47294 - 13822 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.00004549s
	[INFO] 10.244.0.19:33661 - 42506 "A IN hello-world-app.default.svc.cluster.local.ingress-nginx.svc.cluster.local. udp 91 false 512" NXDOMAIN qr,aa,rd 184 0.000111869s
	[INFO] 10.244.0.19:33661 - 36188 "AAAA IN hello-world-app.default.svc.cluster.local.ingress-nginx.svc.cluster.local. udp 91 false 512" NXDOMAIN qr,aa,rd 184 0.000052456s
	[INFO] 10.244.0.19:53347 - 39679 "A IN hello-world-app.default.svc.cluster.local.ingress-nginx.svc.cluster.local. udp 91 false 512" NXDOMAIN qr,aa,rd 184 0.00003529s
	[INFO] 10.244.0.19:33661 - 46126 "A IN hello-world-app.default.svc.cluster.local.svc.cluster.local. udp 77 false 512" NXDOMAIN qr,aa,rd 170 0.000030457s
	[INFO] 10.244.0.19:53347 - 10354 "AAAA IN hello-world-app.default.svc.cluster.local.ingress-nginx.svc.cluster.local. udp 91 false 512" NXDOMAIN qr,aa,rd 184 0.000043225s
	[INFO] 10.244.0.19:33661 - 22714 "AAAA IN hello-world-app.default.svc.cluster.local.svc.cluster.local. udp 77 false 512" NXDOMAIN qr,aa,rd 170 0.000030777s
	[INFO] 10.244.0.19:53347 - 36567 "A IN hello-world-app.default.svc.cluster.local.svc.cluster.local. udp 77 false 512" NXDOMAIN qr,aa,rd 170 0.000026158s
	[INFO] 10.244.0.19:33661 - 38212 "A IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000026864s
	[INFO] 10.244.0.19:53347 - 53381 "AAAA IN hello-world-app.default.svc.cluster.local.svc.cluster.local. udp 77 false 512" NXDOMAIN qr,aa,rd 170 0.00002537s
	[INFO] 10.244.0.19:33661 - 5545 "AAAA IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000051102s
	[INFO] 10.244.0.19:53347 - 12703 "A IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000030236s
	[INFO] 10.244.0.19:53347 - 5299 "AAAA IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000104795s
	[INFO] 10.244.0.19:33661 - 58257 "A IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.003636437s
	[INFO] 10.244.0.19:53347 - 64032 "A IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.003391416s
	[INFO] 10.244.0.19:53347 - 43518 "AAAA IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.00089202s
	[INFO] 10.244.0.19:53347 - 37749 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000061661s
	[INFO] 10.244.0.19:33661 - 51374 "AAAA IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.006381781s
	[INFO] 10.244.0.19:33661 - 12409 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000108709s
	
	* 
	* ==> describe nodes <==
	* Name:               addons-488129
	Roles:              control-plane
	Labels:             beta.kubernetes.io/arch=arm64
	                    beta.kubernetes.io/os=linux
	                    kubernetes.io/arch=arm64
	                    kubernetes.io/hostname=addons-488129
	                    kubernetes.io/os=linux
	                    minikube.k8s.io/commit=a7798054841a94294fc1e610bab097fa7942f774
	                    minikube.k8s.io/name=addons-488129
	                    minikube.k8s.io/primary=true
	                    minikube.k8s.io/updated_at=2023_12_01T18_52_54_0700
	                    minikube.k8s.io/version=v1.32.0
	                    node-role.kubernetes.io/control-plane=
	                    node.kubernetes.io/exclude-from-external-load-balancers=
	                    topology.hostpath.csi/node=addons-488129
	Annotations:        kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock
	                    node.alpha.kubernetes.io/ttl: 0
	                    volumes.kubernetes.io/controller-managed-attach-detach: true
	CreationTimestamp:  Fri, 01 Dec 2023 18:52:50 +0000
	Taints:             <none>
	Unschedulable:      false
	Lease:
	  HolderIdentity:  addons-488129
	  AcquireTime:     <unset>
	  RenewTime:       Fri, 01 Dec 2023 18:56:17 +0000
	Conditions:
	  Type             Status  LastHeartbeatTime                 LastTransitionTime                Reason                       Message
	  ----             ------  -----------------                 ------------------                ------                       -------
	  MemoryPressure   False   Fri, 01 Dec 2023 18:55:57 +0000   Fri, 01 Dec 2023 18:52:46 +0000   KubeletHasSufficientMemory   kubelet has sufficient memory available
	  DiskPressure     False   Fri, 01 Dec 2023 18:55:57 +0000   Fri, 01 Dec 2023 18:52:46 +0000   KubeletHasNoDiskPressure     kubelet has no disk pressure
	  PIDPressure      False   Fri, 01 Dec 2023 18:55:57 +0000   Fri, 01 Dec 2023 18:52:46 +0000   KubeletHasSufficientPID      kubelet has sufficient PID available
	  Ready            True    Fri, 01 Dec 2023 18:55:57 +0000   Fri, 01 Dec 2023 18:53:03 +0000   KubeletReady                 kubelet is posting ready status
	Addresses:
	  InternalIP:  192.168.49.2
	  Hostname:    addons-488129
	Capacity:
	  cpu:                2
	  ephemeral-storage:  203034800Ki
	  hugepages-1Gi:      0
	  hugepages-2Mi:      0
	  hugepages-32Mi:     0
	  hugepages-64Ki:     0
	  memory:             8022500Ki
	  pods:               110
	Allocatable:
	  cpu:                2
	  ephemeral-storage:  203034800Ki
	  hugepages-1Gi:      0
	  hugepages-2Mi:      0
	  hugepages-32Mi:     0
	  hugepages-64Ki:     0
	  memory:             8022500Ki
	  pods:               110
	System Info:
	  Machine ID:                 90c46a334d964249bf6c456f40f33733
	  System UUID:                9416941f-21d0-4c8b-b640-87ebbaf52134
	  Boot ID:                    8abca68d-6ef7-4596-a2cf-01a2291ed738
	  Kernel Version:             5.15.0-1050-aws
	  OS Image:                   Ubuntu 22.04.3 LTS
	  Operating System:           linux
	  Architecture:               arm64
	  Container Runtime Version:  containerd://1.6.25
	  Kubelet Version:            v1.28.4
	  Kube-Proxy Version:         v1.28.4
	PodCIDR:                      10.244.0.0/24
	PodCIDRs:                     10.244.0.0/24
	Non-terminated Pods:          (12 in total)
	  Namespace                   Name                                     CPU Requests  CPU Limits  Memory Requests  Memory Limits  Age
	  ---------                   ----                                     ------------  ----------  ---------------  -------------  ---
	  default                     hello-world-app-5d77478584-24z8n         0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         26s
	  default                     nginx                                    0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         36s
	  gcp-auth                    gcp-auth-d4c87556c-bg8zr                 0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         3m9s
	  headlamp                    headlamp-777fd4b855-n5kz2                0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         54s
	  kube-system                 coredns-5dd5756b68-7cnd6                 100m (5%!)(MISSING)     0 (0%!)(MISSING)      70Mi (0%!)(MISSING)        170Mi (2%!)(MISSING)     3m20s
	  kube-system                 etcd-addons-488129                       100m (5%!)(MISSING)     0 (0%!)(MISSING)      100Mi (1%!)(MISSING)       0 (0%!)(MISSING)         3m32s
	  kube-system                 kindnet-vlkvw                            100m (5%!)(MISSING)     100m (5%!)(MISSING)   50Mi (0%!)(MISSING)        50Mi (0%!)(MISSING)      3m20s
	  kube-system                 kube-apiserver-addons-488129             250m (12%!)(MISSING)    0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         3m32s
	  kube-system                 kube-controller-manager-addons-488129    200m (10%!)(MISSING)    0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         3m32s
	  kube-system                 kube-proxy-7gj6v                         0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         3m20s
	  kube-system                 kube-scheduler-addons-488129             100m (5%!)(MISSING)     0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         3m32s
	  kube-system                 storage-provisioner                      0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         3m14s
	Allocated resources:
	  (Total limits may be over 100 percent, i.e., overcommitted.)
	  Resource           Requests    Limits
	  --------           --------    ------
	  cpu                850m (42%!)(MISSING)  100m (5%!)(MISSING)
	  memory             220Mi (2%!)(MISSING)  220Mi (2%!)(MISSING)
	  ephemeral-storage  0 (0%!)(MISSING)      0 (0%!)(MISSING)
	  hugepages-1Gi      0 (0%!)(MISSING)      0 (0%!)(MISSING)
	  hugepages-2Mi      0 (0%!)(MISSING)      0 (0%!)(MISSING)
	  hugepages-32Mi     0 (0%!)(MISSING)      0 (0%!)(MISSING)
	  hugepages-64Ki     0 (0%!)(MISSING)      0 (0%!)(MISSING)
	Events:
	  Type    Reason                   Age    From             Message
	  ----    ------                   ----   ----             -------
	  Normal  Starting                 3m18s  kube-proxy       
	  Normal  Starting                 3m32s  kubelet          Starting kubelet.
	  Normal  NodeHasSufficientMemory  3m32s  kubelet          Node addons-488129 status is now: NodeHasSufficientMemory
	  Normal  NodeHasNoDiskPressure    3m32s  kubelet          Node addons-488129 status is now: NodeHasNoDiskPressure
	  Normal  NodeHasSufficientPID     3m32s  kubelet          Node addons-488129 status is now: NodeHasSufficientPID
	  Normal  NodeNotReady             3m32s  kubelet          Node addons-488129 status is now: NodeNotReady
	  Normal  NodeAllocatableEnforced  3m32s  kubelet          Updated Node Allocatable limit across pods
	  Normal  NodeReady                3m22s  kubelet          Node addons-488129 status is now: NodeReady
	  Normal  RegisteredNode           3m20s  node-controller  Node addons-488129 event: Registered Node addons-488129 in Controller
	
	* 
	* ==> dmesg <==
	* [  +0.000754] FS-Cache: N-cookie c=0000000c [p=00000003 fl=2 nc=0 na=1]
	[  +0.001026] FS-Cache: N-cookie d=00000000c0e2b83e{9p.inode} n=00000000b55389ab
	[  +0.001117] FS-Cache: N-key=[8] 'a0385c0100000000'
	[  +0.002877] FS-Cache: Duplicate cookie detected
	[  +0.000724] FS-Cache: O-cookie c=00000006 [p=00000003 fl=226 nc=0 na=1]
	[  +0.001014] FS-Cache: O-cookie d=00000000c0e2b83e{9p.inode} n=0000000009a500bd
	[  +0.001116] FS-Cache: O-key=[8] 'a0385c0100000000'
	[  +0.000790] FS-Cache: N-cookie c=0000000d [p=00000003 fl=2 nc=0 na=1]
	[  +0.000985] FS-Cache: N-cookie d=00000000c0e2b83e{9p.inode} n=000000006d601d2f
	[  +0.001101] FS-Cache: N-key=[8] 'a0385c0100000000'
	[  +2.615492] FS-Cache: Duplicate cookie detected
	[  +0.000772] FS-Cache: O-cookie c=00000004 [p=00000003 fl=226 nc=0 na=1]
	[  +0.001004] FS-Cache: O-cookie d=00000000c0e2b83e{9p.inode} n=0000000010bf0fcb
	[  +0.001152] FS-Cache: O-key=[8] '9f385c0100000000'
	[  +0.000813] FS-Cache: N-cookie c=0000000f [p=00000003 fl=2 nc=0 na=1]
	[  +0.000990] FS-Cache: N-cookie d=00000000c0e2b83e{9p.inode} n=00000000b55389ab
	[  +0.001159] FS-Cache: N-key=[8] '9f385c0100000000'
	[  +0.329811] FS-Cache: Duplicate cookie detected
	[  +0.000747] FS-Cache: O-cookie c=00000009 [p=00000003 fl=226 nc=0 na=1]
	[  +0.001030] FS-Cache: O-cookie d=00000000c0e2b83e{9p.inode} n=00000000e473eb7d
	[  +0.001123] FS-Cache: O-key=[8] 'a7385c0100000000'
	[  +0.000733] FS-Cache: N-cookie c=00000010 [p=00000003 fl=2 nc=0 na=1]
	[  +0.000973] FS-Cache: N-cookie d=00000000c0e2b83e{9p.inode} n=000000003d8bf441
	[  +0.001090] FS-Cache: N-key=[8] 'a7385c0100000000'
	[Dec 1 17:49] kmem.limit_in_bytes is deprecated and will be removed. Please report your usecase to linux-mm@kvack.org if you depend on this functionality.
	
	* 
	* ==> etcd [026c258cdfa60bdbd34d0298a83e5f61936e8b3a021045bfd63a1351446b0101] <==
	* {"level":"info","ts":"2023-12-01T18:52:46.412909Z","caller":"fileutil/purge.go:44","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/wal","suffix":"wal","max":5,"interval":"30s"}
	{"level":"info","ts":"2023-12-01T18:52:46.413284Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc switched to configuration voters=(12593026477526642892)"}
	{"level":"info","ts":"2023-12-01T18:52:46.416587Z","caller":"membership/cluster.go:421","msg":"added member","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","added-peer-id":"aec36adc501070cc","added-peer-peer-urls":["https://192.168.49.2:2380"]}
	{"level":"info","ts":"2023-12-01T18:52:46.416968Z","caller":"embed/etcd.go:597","msg":"serving peer traffic","address":"192.168.49.2:2380"}
	{"level":"info","ts":"2023-12-01T18:52:46.42452Z","caller":"embed/etcd.go:569","msg":"cmux::serve","address":"192.168.49.2:2380"}
	{"level":"info","ts":"2023-12-01T18:52:46.421245Z","caller":"embed/etcd.go:278","msg":"now serving peer/client/metrics","local-member-id":"aec36adc501070cc","initial-advertise-peer-urls":["https://192.168.49.2:2380"],"listen-peer-urls":["https://192.168.49.2:2380"],"advertise-client-urls":["https://192.168.49.2:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.49.2:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
	{"level":"info","ts":"2023-12-01T18:52:46.42129Z","caller":"embed/etcd.go:855","msg":"serving metrics","address":"http://127.0.0.1:2381"}
	{"level":"info","ts":"2023-12-01T18:52:46.868502Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc is starting a new election at term 1"}
	{"level":"info","ts":"2023-12-01T18:52:46.868715Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became pre-candidate at term 1"}
	{"level":"info","ts":"2023-12-01T18:52:46.868806Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgPreVoteResp from aec36adc501070cc at term 1"}
	{"level":"info","ts":"2023-12-01T18:52:46.868914Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became candidate at term 2"}
	{"level":"info","ts":"2023-12-01T18:52:46.869006Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgVoteResp from aec36adc501070cc at term 2"}
	{"level":"info","ts":"2023-12-01T18:52:46.869098Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became leader at term 2"}
	{"level":"info","ts":"2023-12-01T18:52:46.869188Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: aec36adc501070cc elected leader aec36adc501070cc at term 2"}
	{"level":"info","ts":"2023-12-01T18:52:46.872655Z","caller":"etcdserver/server.go:2062","msg":"published local member to cluster through raft","local-member-id":"aec36adc501070cc","local-member-attributes":"{Name:addons-488129 ClientURLs:[https://192.168.49.2:2379]}","request-path":"/0/members/aec36adc501070cc/attributes","cluster-id":"fa54960ea34d58be","publish-timeout":"7s"}
	{"level":"info","ts":"2023-12-01T18:52:46.872826Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
	{"level":"info","ts":"2023-12-01T18:52:46.874038Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
	{"level":"info","ts":"2023-12-01T18:52:46.876604Z","caller":"etcdserver/server.go:2571","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
	{"level":"info","ts":"2023-12-01T18:52:46.876834Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
	{"level":"info","ts":"2023-12-01T18:52:46.877787Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.49.2:2379"}
	{"level":"info","ts":"2023-12-01T18:52:46.884517Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
	{"level":"info","ts":"2023-12-01T18:52:46.884636Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
	{"level":"info","ts":"2023-12-01T18:52:46.884735Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","cluster-version":"3.5"}
	{"level":"info","ts":"2023-12-01T18:52:46.884888Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
	{"level":"info","ts":"2023-12-01T18:52:46.884963Z","caller":"etcdserver/server.go:2595","msg":"cluster version is updated","cluster-version":"3.5"}
	
	* 
	* ==> gcp-auth [7874cfff5c364a73f7e0fd658c6db6de65fe59b3b61fd98645b513af8785b722] <==
	* 2023/12/01 18:54:00 GCP Auth Webhook started!
	2023/12/01 18:54:34 Ready to marshal response ...
	2023/12/01 18:54:34 Ready to write response ...
	2023/12/01 18:54:45 Ready to marshal response ...
	2023/12/01 18:54:45 Ready to write response ...
	2023/12/01 18:54:45 Ready to marshal response ...
	2023/12/01 18:54:45 Ready to write response ...
	2023/12/01 18:54:50 Ready to marshal response ...
	2023/12/01 18:54:50 Ready to write response ...
	2023/12/01 18:54:58 Ready to marshal response ...
	2023/12/01 18:54:58 Ready to write response ...
	2023/12/01 18:55:08 Ready to marshal response ...
	2023/12/01 18:55:08 Ready to write response ...
	2023/12/01 18:55:31 Ready to marshal response ...
	2023/12/01 18:55:31 Ready to write response ...
	2023/12/01 18:55:31 Ready to marshal response ...
	2023/12/01 18:55:31 Ready to write response ...
	2023/12/01 18:55:31 Ready to marshal response ...
	2023/12/01 18:55:31 Ready to write response ...
	2023/12/01 18:55:49 Ready to marshal response ...
	2023/12/01 18:55:49 Ready to write response ...
	2023/12/01 18:55:59 Ready to marshal response ...
	2023/12/01 18:55:59 Ready to write response ...
	
	* 
	* ==> kernel <==
	*  18:56:26 up  1:38,  0 users,  load average: 1.35, 1.85, 1.82
	Linux addons-488129 5.15.0-1050-aws #55~20.04.1-Ubuntu SMP Mon Nov 6 12:18:16 UTC 2023 aarch64 aarch64 aarch64 GNU/Linux
	PRETTY_NAME="Ubuntu 22.04.3 LTS"
	
	* 
	* ==> kindnet [22b7e6d47d9f4a75bc24caaef1be384992b615bd5487662f96c620c94cea5907] <==
	* I1201 18:54:17.461915       1 main.go:227] handling current node
	I1201 18:54:27.474809       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I1201 18:54:27.474843       1 main.go:227] handling current node
	I1201 18:54:37.479469       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I1201 18:54:37.479501       1 main.go:227] handling current node
	I1201 18:54:47.492361       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I1201 18:54:47.492389       1 main.go:227] handling current node
	I1201 18:54:57.504576       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I1201 18:54:57.504606       1 main.go:227] handling current node
	I1201 18:55:07.517658       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I1201 18:55:07.517689       1 main.go:227] handling current node
	I1201 18:55:17.536070       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I1201 18:55:17.536099       1 main.go:227] handling current node
	I1201 18:55:27.540413       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I1201 18:55:27.540445       1 main.go:227] handling current node
	I1201 18:55:37.545731       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I1201 18:55:37.545764       1 main.go:227] handling current node
	I1201 18:55:47.550017       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I1201 18:55:47.550300       1 main.go:227] handling current node
	I1201 18:55:57.554649       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I1201 18:55:57.554678       1 main.go:227] handling current node
	I1201 18:56:07.559292       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I1201 18:56:07.559326       1 main.go:227] handling current node
	I1201 18:56:17.572131       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I1201 18:56:17.572163       1 main.go:227] handling current node
	
	* 
	* ==> kube-apiserver [480e877a317963e7b161271ddde980075bebeedd87746305468098b110ed14c9] <==
	* I1201 18:55:24.073851       1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
	I1201 18:55:24.073914       1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
	I1201 18:55:24.084246       1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
	I1201 18:55:24.084320       1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
	I1201 18:55:24.094506       1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
	I1201 18:55:24.094572       1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
	I1201 18:55:24.114175       1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
	I1201 18:55:24.114239       1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
	I1201 18:55:24.148791       1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
	I1201 18:55:24.148834       1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
	I1201 18:55:24.149706       1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
	I1201 18:55:24.149755       1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
	W1201 18:55:25.084570       1 cacher.go:171] Terminating all watchers from cacher volumesnapshotclasses.snapshot.storage.k8s.io
	W1201 18:55:25.144397       1 cacher.go:171] Terminating all watchers from cacher volumesnapshotcontents.snapshot.storage.k8s.io
	W1201 18:55:25.159475       1 cacher.go:171] Terminating all watchers from cacher volumesnapshots.snapshot.storage.k8s.io
	I1201 18:55:31.598207       1 alloc.go:330] "allocated clusterIPs" service="headlamp/headlamp" clusterIPs={"IPv4":"10.101.117.180"}
	I1201 18:55:48.524890       1 handler.go:232] Adding GroupVersion gadget.kinvolk.io v1alpha1 to ResourceManager
	I1201 18:55:48.537162       1 handler.go:232] Adding GroupVersion gadget.kinvolk.io v1alpha1 to ResourceManager
	W1201 18:55:49.576912       1 cacher.go:171] Terminating all watchers from cacher traces.gadget.kinvolk.io
	I1201 18:55:49.641779       1 controller.go:624] quota admission added evaluator for: ingresses.networking.k8s.io
	I1201 18:55:49.973087       1 alloc.go:330] "allocated clusterIPs" service="default/nginx" clusterIPs={"IPv4":"10.100.201.25"}
	I1201 18:55:53.103112       1 controller.go:129] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Nothing (removed from the queue).
	I1201 18:55:59.801849       1 alloc.go:330] "allocated clusterIPs" service="default/hello-world-app" clusterIPs={"IPv4":"10.108.211.22"}
	E1201 18:56:17.310582       1 authentication.go:73] "Unable to authenticate the request" err="[invalid bearer token, serviceaccounts \"ingress-nginx\" not found]"
	E1201 18:56:18.493729       1 authentication.go:73] "Unable to authenticate the request" err="[invalid bearer token, serviceaccounts \"ingress-nginx\" not found]"
	
	* 
	* ==> kube-controller-manager [d7d1b3ff7e10d0dd97177d7c104d406e561aa86ccb86ee71d3090c336b7ea229] <==
	* I1201 18:55:59.586321       1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/hello-world-app-5d77478584" duration="38.863765ms"
	I1201 18:55:59.602469       1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/hello-world-app-5d77478584" duration="16.039884ms"
	I1201 18:55:59.602602       1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/hello-world-app-5d77478584" duration="35.422µs"
	I1201 18:55:59.609877       1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/hello-world-app-5d77478584" duration="79.885µs"
	W1201 18:55:59.775749       1 reflector.go:535] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	E1201 18:55:59.775853       1 reflector.go:147] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	W1201 18:56:00.767482       1 reflector.go:535] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	E1201 18:56:00.767520       1 reflector.go:147] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	W1201 18:56:01.265896       1 reflector.go:535] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	E1201 18:56:01.265933       1 reflector.go:147] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	I1201 18:56:02.460589       1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/hello-world-app-5d77478584" duration="41.756µs"
	I1201 18:56:03.460836       1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/hello-world-app-5d77478584" duration="70.892µs"
	I1201 18:56:04.465092       1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/hello-world-app-5d77478584" duration="74.208µs"
	W1201 18:56:04.696192       1 reflector.go:535] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	E1201 18:56:04.696225       1 reflector.go:147] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	I1201 18:56:05.833283       1 shared_informer.go:311] Waiting for caches to sync for resource quota
	I1201 18:56:05.833350       1 shared_informer.go:318] Caches are synced for resource quota
	I1201 18:56:06.209446       1 shared_informer.go:311] Waiting for caches to sync for garbage collector
	I1201 18:56:06.209495       1 shared_informer.go:318] Caches are synced for garbage collector
	W1201 18:56:11.809687       1 reflector.go:535] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	E1201 18:56:11.809720       1 reflector.go:147] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	I1201 18:56:17.219528       1 job_controller.go:562] "enqueueing job" key="ingress-nginx/ingress-nginx-admission-create"
	I1201 18:56:17.224269       1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="ingress-nginx/ingress-nginx-controller-7c6974c4d8" duration="4.677µs"
	I1201 18:56:17.230135       1 job_controller.go:562] "enqueueing job" key="ingress-nginx/ingress-nginx-admission-patch"
	I1201 18:56:19.507385       1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/hello-world-app-5d77478584" duration="44.39µs"
	
	* 
	* ==> kube-proxy [dc03101da001715de96365f324103879df483652ca7f6664370d9927ffd1f0ca] <==
	* I1201 18:53:06.891132       1 server_others.go:69] "Using iptables proxy"
	I1201 18:53:06.933932       1 node.go:141] Successfully retrieved node IP: 192.168.49.2
	I1201 18:53:06.998559       1 server.go:632] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
	I1201 18:53:07.017133       1 server_others.go:152] "Using iptables Proxier"
	I1201 18:53:07.017196       1 server_others.go:421] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
	I1201 18:53:07.017206       1 server_others.go:438] "Defaulting to no-op detect-local"
	I1201 18:53:07.017255       1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
	I1201 18:53:07.017535       1 server.go:846] "Version info" version="v1.28.4"
	I1201 18:53:07.017547       1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
	I1201 18:53:07.020073       1 config.go:188] "Starting service config controller"
	I1201 18:53:07.020091       1 shared_informer.go:311] Waiting for caches to sync for service config
	I1201 18:53:07.020114       1 config.go:97] "Starting endpoint slice config controller"
	I1201 18:53:07.020118       1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
	I1201 18:53:07.026812       1 config.go:315] "Starting node config controller"
	I1201 18:53:07.026834       1 shared_informer.go:311] Waiting for caches to sync for node config
	I1201 18:53:07.121089       1 shared_informer.go:318] Caches are synced for endpoint slice config
	I1201 18:53:07.121139       1 shared_informer.go:318] Caches are synced for service config
	I1201 18:53:07.127583       1 shared_informer.go:318] Caches are synced for node config
	
	* 
	* ==> kube-scheduler [a2dc008f668852b684a5893288c209a914d808614cc5feee25fc13be60fb4604] <==
	* W1201 18:52:50.391556       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
	E1201 18:52:50.391879       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
	W1201 18:52:50.391648       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
	E1201 18:52:50.391953       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
	W1201 18:52:50.394060       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
	E1201 18:52:50.396560       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
	W1201 18:52:50.396797       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
	E1201 18:52:50.396899       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
	W1201 18:52:50.394464       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
	E1201 18:52:50.397169       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
	W1201 18:52:50.394760       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
	E1201 18:52:50.397360       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
	W1201 18:52:50.394885       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
	E1201 18:52:50.397570       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
	W1201 18:52:50.395188       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
	E1201 18:52:50.397741       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
	W1201 18:52:50.395457       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
	W1201 18:52:50.395899       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
	E1201 18:52:50.398501       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
	W1201 18:52:50.396007       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
	E1201 18:52:50.398782       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
	W1201 18:52:50.396198       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
	E1201 18:52:50.399187       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
	E1201 18:52:50.398444       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
	I1201 18:52:51.780597       1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
	
	* 
	* ==> kubelet <==
	* Dec 01 18:56:04 addons-488129 kubelet[1356]: E1201 18:56:04.452071    1356 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"hello-world-app\" with CrashLoopBackOff: \"back-off 10s restarting failed container=hello-world-app pod=hello-world-app-5d77478584-24z8n_default(7ebc113c-63c6-4a4b-b368-401bab63a7b8)\"" pod="default/hello-world-app-5d77478584-24z8n" podUID="7ebc113c-63c6-4a4b-b368-401bab63a7b8"
	Dec 01 18:56:07 addons-488129 kubelet[1356]: I1201 18:56:07.259773    1356 scope.go:117] "RemoveContainer" containerID="6e66b37d420de39b1f3cbf3c9ed8c44551d1156420cd199829faaf30ac97e6cf"
	Dec 01 18:56:07 addons-488129 kubelet[1356]: E1201 18:56:07.260585    1356 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"minikube-ingress-dns\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=minikube-ingress-dns pod=kube-ingress-dns-minikube_kube-system(a8ea6287-3f31-4001-a60a-96eea450815d)\"" pod="kube-system/kube-ingress-dns-minikube" podUID="a8ea6287-3f31-4001-a60a-96eea450815d"
	Dec 01 18:56:15 addons-488129 kubelet[1356]: I1201 18:56:15.991034    1356 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mf47h\" (UniqueName: \"kubernetes.io/projected/a8ea6287-3f31-4001-a60a-96eea450815d-kube-api-access-mf47h\") pod \"a8ea6287-3f31-4001-a60a-96eea450815d\" (UID: \"a8ea6287-3f31-4001-a60a-96eea450815d\") "
	Dec 01 18:56:15 addons-488129 kubelet[1356]: I1201 18:56:15.996542    1356 operation_generator.go:882] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8ea6287-3f31-4001-a60a-96eea450815d-kube-api-access-mf47h" (OuterVolumeSpecName: "kube-api-access-mf47h") pod "a8ea6287-3f31-4001-a60a-96eea450815d" (UID: "a8ea6287-3f31-4001-a60a-96eea450815d"). InnerVolumeSpecName "kube-api-access-mf47h". PluginName "kubernetes.io/projected", VolumeGidValue ""
	Dec 01 18:56:16 addons-488129 kubelet[1356]: I1201 18:56:16.091883    1356 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-mf47h\" (UniqueName: \"kubernetes.io/projected/a8ea6287-3f31-4001-a60a-96eea450815d-kube-api-access-mf47h\") on node \"addons-488129\" DevicePath \"\""
	Dec 01 18:56:16 addons-488129 kubelet[1356]: I1201 18:56:16.480328    1356 scope.go:117] "RemoveContainer" containerID="6e66b37d420de39b1f3cbf3c9ed8c44551d1156420cd199829faaf30ac97e6cf"
	Dec 01 18:56:17 addons-488129 kubelet[1356]: I1201 18:56:17.266927    1356 kubelet_volumes.go:161] "Cleaned up orphaned pod volumes dir" podUID="5ed38834-35c0-4c0b-a328-2353ac07ee48" path="/var/lib/kubelet/pods/5ed38834-35c0-4c0b-a328-2353ac07ee48/volumes"
	Dec 01 18:56:17 addons-488129 kubelet[1356]: I1201 18:56:17.270573    1356 kubelet_volumes.go:161] "Cleaned up orphaned pod volumes dir" podUID="a8ea6287-3f31-4001-a60a-96eea450815d" path="/var/lib/kubelet/pods/a8ea6287-3f31-4001-a60a-96eea450815d/volumes"
	Dec 01 18:56:19 addons-488129 kubelet[1356]: I1201 18:56:19.257160    1356 scope.go:117] "RemoveContainer" containerID="3e1d7966fcbbc004ff70a114d141da0de283e28cfa2c704761eb02d18601bb7f"
	Dec 01 18:56:19 addons-488129 kubelet[1356]: I1201 18:56:19.260443    1356 kubelet_volumes.go:161] "Cleaned up orphaned pod volumes dir" podUID="6c31e2ca-9223-48f1-9270-a81654a6175a" path="/var/lib/kubelet/pods/6c31e2ca-9223-48f1-9270-a81654a6175a/volumes"
	Dec 01 18:56:19 addons-488129 kubelet[1356]: I1201 18:56:19.492412    1356 scope.go:117] "RemoveContainer" containerID="3e1d7966fcbbc004ff70a114d141da0de283e28cfa2c704761eb02d18601bb7f"
	Dec 01 18:56:19 addons-488129 kubelet[1356]: I1201 18:56:19.492917    1356 scope.go:117] "RemoveContainer" containerID="3b9be2d2fd676b8e02da39b06d73a9a8ffca8bf2cc6d19c7b3ab27de9ad9ff9b"
	Dec 01 18:56:19 addons-488129 kubelet[1356]: E1201 18:56:19.493577    1356 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"hello-world-app\" with CrashLoopBackOff: \"back-off 20s restarting failed container=hello-world-app pod=hello-world-app-5d77478584-24z8n_default(7ebc113c-63c6-4a4b-b368-401bab63a7b8)\"" pod="default/hello-world-app-5d77478584-24z8n" podUID="7ebc113c-63c6-4a4b-b368-401bab63a7b8"
	Dec 01 18:56:20 addons-488129 kubelet[1356]: I1201 18:56:20.498838    1356 scope.go:117] "RemoveContainer" containerID="5a4e71eefda89dc132d689de97311f45998966a5f8aac13d421d0469d74200f7"
	Dec 01 18:56:20 addons-488129 kubelet[1356]: I1201 18:56:20.507321    1356 scope.go:117] "RemoveContainer" containerID="5a4e71eefda89dc132d689de97311f45998966a5f8aac13d421d0469d74200f7"
	Dec 01 18:56:20 addons-488129 kubelet[1356]: E1201 18:56:20.507956    1356 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = an error occurred when try to find container \"5a4e71eefda89dc132d689de97311f45998966a5f8aac13d421d0469d74200f7\": not found" containerID="5a4e71eefda89dc132d689de97311f45998966a5f8aac13d421d0469d74200f7"
	Dec 01 18:56:20 addons-488129 kubelet[1356]: I1201 18:56:20.508002    1356 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"containerd","ID":"5a4e71eefda89dc132d689de97311f45998966a5f8aac13d421d0469d74200f7"} err="failed to get container status \"5a4e71eefda89dc132d689de97311f45998966a5f8aac13d421d0469d74200f7\": rpc error: code = NotFound desc = an error occurred when try to find container \"5a4e71eefda89dc132d689de97311f45998966a5f8aac13d421d0469d74200f7\": not found"
	Dec 01 18:56:20 addons-488129 kubelet[1356]: I1201 18:56:20.530343    1356 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vvjwq\" (UniqueName: \"kubernetes.io/projected/77e18182-836c-4166-b1a0-29e24b98d4a5-kube-api-access-vvjwq\") pod \"77e18182-836c-4166-b1a0-29e24b98d4a5\" (UID: \"77e18182-836c-4166-b1a0-29e24b98d4a5\") "
	Dec 01 18:56:20 addons-488129 kubelet[1356]: I1201 18:56:20.530402    1356 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/77e18182-836c-4166-b1a0-29e24b98d4a5-webhook-cert\") pod \"77e18182-836c-4166-b1a0-29e24b98d4a5\" (UID: \"77e18182-836c-4166-b1a0-29e24b98d4a5\") "
	Dec 01 18:56:20 addons-488129 kubelet[1356]: I1201 18:56:20.532937    1356 operation_generator.go:882] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77e18182-836c-4166-b1a0-29e24b98d4a5-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "77e18182-836c-4166-b1a0-29e24b98d4a5" (UID: "77e18182-836c-4166-b1a0-29e24b98d4a5"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue ""
	Dec 01 18:56:20 addons-488129 kubelet[1356]: I1201 18:56:20.533401    1356 operation_generator.go:882] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77e18182-836c-4166-b1a0-29e24b98d4a5-kube-api-access-vvjwq" (OuterVolumeSpecName: "kube-api-access-vvjwq") pod "77e18182-836c-4166-b1a0-29e24b98d4a5" (UID: "77e18182-836c-4166-b1a0-29e24b98d4a5"). InnerVolumeSpecName "kube-api-access-vvjwq". PluginName "kubernetes.io/projected", VolumeGidValue ""
	Dec 01 18:56:20 addons-488129 kubelet[1356]: I1201 18:56:20.630852    1356 reconciler_common.go:300] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/77e18182-836c-4166-b1a0-29e24b98d4a5-webhook-cert\") on node \"addons-488129\" DevicePath \"\""
	Dec 01 18:56:20 addons-488129 kubelet[1356]: I1201 18:56:20.630897    1356 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-vvjwq\" (UniqueName: \"kubernetes.io/projected/77e18182-836c-4166-b1a0-29e24b98d4a5-kube-api-access-vvjwq\") on node \"addons-488129\" DevicePath \"\""
	Dec 01 18:56:21 addons-488129 kubelet[1356]: I1201 18:56:21.261223    1356 kubelet_volumes.go:161] "Cleaned up orphaned pod volumes dir" podUID="77e18182-836c-4166-b1a0-29e24b98d4a5" path="/var/lib/kubelet/pods/77e18182-836c-4166-b1a0-29e24b98d4a5/volumes"
	
	* 
	* ==> storage-provisioner [4a1990b8f0d3a5d0d19deb52923a8d026ac8aae675babe0fd43c7276af7dcc1e] <==
	* I1201 18:53:13.137275       1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
	I1201 18:53:13.166094       1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
	I1201 18:53:13.166177       1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
	I1201 18:53:13.189071       1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
	I1201 18:53:13.189306       1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_addons-488129_4057c06b-a92a-4d1a-93cb-a960104021bf!
	I1201 18:53:13.190982       1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"bee02a71-bfd3-4446-be64-ad91c7e21d84", APIVersion:"v1", ResourceVersion:"632", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' addons-488129_4057c06b-a92a-4d1a-93cb-a960104021bf became leader
	I1201 18:53:13.290033       1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_addons-488129_4057c06b-a92a-4d1a-93cb-a960104021bf!
	E1201 18:55:16.357134       1 controller.go:1050] claim "f53ea710-f960-449e-8b49-0e6d59150ba7" in work queue no longer exists
	

                                                
                                                
-- /stdout --
helpers_test.go:254: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p addons-488129 -n addons-488129
helpers_test.go:261: (dbg) Run:  kubectl --context addons-488129 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:285: <<< TestAddons/parallel/Ingress FAILED: end of post-mortem logs <<<
helpers_test.go:286: ---------------------/post-mortem---------------------------------
--- FAIL: TestAddons/parallel/Ingress (38.33s)

                                                
                                    
x
+
TestFunctional/serial/ExtraConfig (16.46s)

                                                
                                                
=== RUN   TestFunctional/serial/ExtraConfig
functional_test.go:753: (dbg) Run:  out/minikube-linux-arm64 start -p functional-616785 --extra-config=apiserver.enable-admission-plugins=NamespaceAutoProvision --wait=all
E1201 19:00:05.350578  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.crt: no such file or directory
functional_test.go:753: (dbg) Non-zero exit: out/minikube-linux-arm64 start -p functional-616785 --extra-config=apiserver.enable-admission-plugins=NamespaceAutoProvision --wait=all: exit status 80 (13.945548533s)

                                                
                                                
-- stdout --
	* [functional-616785] minikube v1.32.0 on Ubuntu 20.04 (arm64)
	  - MINIKUBE_LOCATION=17703
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/17703-252966/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/17703-252966/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-arm64
	  - MINIKUBE_FORCE_SYSTEMD=
	* Using the docker driver based on existing profile
	* Starting control plane node functional-616785 in cluster functional-616785
	* Pulling base image ...
	* Updating the running docker "functional-616785" container ...
	* Preparing Kubernetes v1.28.4 on containerd 1.6.25 ...
	  - apiserver.enable-admission-plugins=NamespaceAutoProvision
	* Configuring CNI (Container Networking Interface) ...
	* Verifying Kubernetes components...
	  - Using image gcr.io/k8s-minikube/storage-provisioner:v5
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	E1201 19:00:08.373484  284652 pod_ready.go:66] WaitExtra: waitPodCondition: node "functional-616785" hosting pod "coredns-5dd5756b68-ts5dh" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-616785" has status "Ready":"False"
	E1201 19:00:08.390808  284652 pod_ready.go:66] WaitExtra: waitPodCondition: node "functional-616785" hosting pod "etcd-functional-616785" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-616785" has status "Ready":"False"
	E1201 19:00:08.403088  284652 pod_ready.go:66] WaitExtra: waitPodCondition: node "functional-616785" hosting pod "kube-apiserver-functional-616785" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-616785" has status "Ready":"False"
	E1201 19:00:08.421661  284652 pod_ready.go:66] WaitExtra: waitPodCondition: node "functional-616785" hosting pod "kube-controller-manager-functional-616785" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-616785" has status "Ready":"False"
	E1201 19:00:08.765301  284652 pod_ready.go:66] WaitExtra: waitPodCondition: node "functional-616785" hosting pod "kube-proxy-d8cvf" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-616785" has status "Ready":"False"
	E1201 19:00:09.162511  284652 pod_ready.go:66] WaitExtra: waitPodCondition: node "functional-616785" hosting pod "kube-scheduler-functional-616785" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-616785" has status "Ready":"False"
	E1201 19:00:09.475849  284652 start.go:894] failed to get current CoreDNS ConfigMap: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.4/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml": Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8441 was refused - did you specify the right host or port?
	Failed to inject host.minikube.internal into CoreDNS, this will limit the pods access to the host IPX Exiting due to GUEST_START: failed to start node: wait 6m0s for node: waiting for node to be ready: waitNodeCondition: error getting node "functional-616785": Get "https://192.168.49.2:8441/api/v1/nodes/functional-616785": dial tcp 192.168.49.2:8441: connect: connection refused
	* 
	╭─────────────────────────────────────────────────────────────────────────────────────────────╮
	│                                                                                             │
	│    * If the above advice does not help, please let us know:                                 │
	│      https://github.com/kubernetes/minikube/issues/new/choose                               │
	│                                                                                             │
	│    * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue.    │
	│                                                                                             │
	╰─────────────────────────────────────────────────────────────────────────────────────────────╯

                                                
                                                
** /stderr **
functional_test.go:755: failed to restart minikube. args "out/minikube-linux-arm64 start -p functional-616785 --extra-config=apiserver.enable-admission-plugins=NamespaceAutoProvision --wait=all": exit status 80
functional_test.go:757: restart took 13.945739332s for "functional-616785" cluster.
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======>  post-mortem[TestFunctional/serial/ExtraConfig]: docker inspect <======
helpers_test.go:231: (dbg) Run:  docker inspect functional-616785
helpers_test.go:235: (dbg) docker inspect functional-616785:

                                                
                                                
-- stdout --
	[
	    {
	        "Id": "8fb32b74d7c5c78662dd1fc46356f2a9d1acf6222d4c0e4ac405b58b46a669fd",
	        "Created": "2023-12-01T18:58:44.056091264Z",
	        "Path": "/usr/local/bin/entrypoint",
	        "Args": [
	            "/sbin/init"
	        ],
	        "State": {
	            "Status": "running",
	            "Running": true,
	            "Paused": false,
	            "Restarting": false,
	            "OOMKilled": false,
	            "Dead": false,
	            "Pid": 280935,
	            "ExitCode": 0,
	            "Error": "",
	            "StartedAt": "2023-12-01T18:58:44.391941652Z",
	            "FinishedAt": "0001-01-01T00:00:00Z"
	        },
	        "Image": "sha256:e4e0f3cc6f04c458835e9edb05d52f031520d40521bc3568d81cbb7c06a79ef2",
	        "ResolvConfPath": "/var/lib/docker/containers/8fb32b74d7c5c78662dd1fc46356f2a9d1acf6222d4c0e4ac405b58b46a669fd/resolv.conf",
	        "HostnamePath": "/var/lib/docker/containers/8fb32b74d7c5c78662dd1fc46356f2a9d1acf6222d4c0e4ac405b58b46a669fd/hostname",
	        "HostsPath": "/var/lib/docker/containers/8fb32b74d7c5c78662dd1fc46356f2a9d1acf6222d4c0e4ac405b58b46a669fd/hosts",
	        "LogPath": "/var/lib/docker/containers/8fb32b74d7c5c78662dd1fc46356f2a9d1acf6222d4c0e4ac405b58b46a669fd/8fb32b74d7c5c78662dd1fc46356f2a9d1acf6222d4c0e4ac405b58b46a669fd-json.log",
	        "Name": "/functional-616785",
	        "RestartCount": 0,
	        "Driver": "overlay2",
	        "Platform": "linux",
	        "MountLabel": "",
	        "ProcessLabel": "",
	        "AppArmorProfile": "unconfined",
	        "ExecIDs": null,
	        "HostConfig": {
	            "Binds": [
	                "functional-616785:/var",
	                "/lib/modules:/lib/modules:ro"
	            ],
	            "ContainerIDFile": "",
	            "LogConfig": {
	                "Type": "json-file",
	                "Config": {}
	            },
	            "NetworkMode": "functional-616785",
	            "PortBindings": {
	                "22/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "2376/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "32443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "5000/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "8441/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ]
	            },
	            "RestartPolicy": {
	                "Name": "no",
	                "MaximumRetryCount": 0
	            },
	            "AutoRemove": false,
	            "VolumeDriver": "",
	            "VolumesFrom": null,
	            "ConsoleSize": [
	                0,
	                0
	            ],
	            "CapAdd": null,
	            "CapDrop": null,
	            "CgroupnsMode": "host",
	            "Dns": [],
	            "DnsOptions": [],
	            "DnsSearch": [],
	            "ExtraHosts": null,
	            "GroupAdd": null,
	            "IpcMode": "private",
	            "Cgroup": "",
	            "Links": null,
	            "OomScoreAdj": 0,
	            "PidMode": "",
	            "Privileged": true,
	            "PublishAllPorts": false,
	            "ReadonlyRootfs": false,
	            "SecurityOpt": [
	                "seccomp=unconfined",
	                "apparmor=unconfined",
	                "label=disable"
	            ],
	            "Tmpfs": {
	                "/run": "",
	                "/tmp": ""
	            },
	            "UTSMode": "",
	            "UsernsMode": "",
	            "ShmSize": 67108864,
	            "Runtime": "runc",
	            "Isolation": "",
	            "CpuShares": 0,
	            "Memory": 4194304000,
	            "NanoCpus": 2000000000,
	            "CgroupParent": "",
	            "BlkioWeight": 0,
	            "BlkioWeightDevice": [],
	            "BlkioDeviceReadBps": [],
	            "BlkioDeviceWriteBps": [],
	            "BlkioDeviceReadIOps": [],
	            "BlkioDeviceWriteIOps": [],
	            "CpuPeriod": 0,
	            "CpuQuota": 0,
	            "CpuRealtimePeriod": 0,
	            "CpuRealtimeRuntime": 0,
	            "CpusetCpus": "",
	            "CpusetMems": "",
	            "Devices": [],
	            "DeviceCgroupRules": null,
	            "DeviceRequests": null,
	            "MemoryReservation": 0,
	            "MemorySwap": 8388608000,
	            "MemorySwappiness": null,
	            "OomKillDisable": false,
	            "PidsLimit": null,
	            "Ulimits": null,
	            "CpuCount": 0,
	            "CpuPercent": 0,
	            "IOMaximumIOps": 0,
	            "IOMaximumBandwidth": 0,
	            "MaskedPaths": null,
	            "ReadonlyPaths": null
	        },
	        "GraphDriver": {
	            "Data": {
	                "LowerDir": "/var/lib/docker/overlay2/90a1eebdd2de22d9f2af0c999b2d595a52358bdd67ea923fc6f73d4dddfbf487-init/diff:/var/lib/docker/overlay2/049ae54891020b74263d4d0f668244f51ae19df0871773fd59686314976f2fd9/diff",
	                "MergedDir": "/var/lib/docker/overlay2/90a1eebdd2de22d9f2af0c999b2d595a52358bdd67ea923fc6f73d4dddfbf487/merged",
	                "UpperDir": "/var/lib/docker/overlay2/90a1eebdd2de22d9f2af0c999b2d595a52358bdd67ea923fc6f73d4dddfbf487/diff",
	                "WorkDir": "/var/lib/docker/overlay2/90a1eebdd2de22d9f2af0c999b2d595a52358bdd67ea923fc6f73d4dddfbf487/work"
	            },
	            "Name": "overlay2"
	        },
	        "Mounts": [
	            {
	                "Type": "volume",
	                "Name": "functional-616785",
	                "Source": "/var/lib/docker/volumes/functional-616785/_data",
	                "Destination": "/var",
	                "Driver": "local",
	                "Mode": "z",
	                "RW": true,
	                "Propagation": ""
	            },
	            {
	                "Type": "bind",
	                "Source": "/lib/modules",
	                "Destination": "/lib/modules",
	                "Mode": "ro",
	                "RW": false,
	                "Propagation": "rprivate"
	            }
	        ],
	        "Config": {
	            "Hostname": "functional-616785",
	            "Domainname": "",
	            "User": "",
	            "AttachStdin": false,
	            "AttachStdout": false,
	            "AttachStderr": false,
	            "ExposedPorts": {
	                "22/tcp": {},
	                "2376/tcp": {},
	                "32443/tcp": {},
	                "5000/tcp": {},
	                "8441/tcp": {}
	            },
	            "Tty": true,
	            "OpenStdin": false,
	            "StdinOnce": false,
	            "Env": [
	                "container=docker",
	                "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
	            ],
	            "Cmd": null,
	            "Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f",
	            "Volumes": null,
	            "WorkingDir": "/",
	            "Entrypoint": [
	                "/usr/local/bin/entrypoint",
	                "/sbin/init"
	            ],
	            "OnBuild": null,
	            "Labels": {
	                "created_by.minikube.sigs.k8s.io": "true",
	                "mode.minikube.sigs.k8s.io": "functional-616785",
	                "name.minikube.sigs.k8s.io": "functional-616785",
	                "role.minikube.sigs.k8s.io": ""
	            },
	            "StopSignal": "SIGRTMIN+3"
	        },
	        "NetworkSettings": {
	            "Bridge": "",
	            "SandboxID": "11283c9328ce6444489fa187690a1d377758621c63df408c957a63839729f49c",
	            "HairpinMode": false,
	            "LinkLocalIPv6Address": "",
	            "LinkLocalIPv6PrefixLen": 0,
	            "Ports": {
	                "22/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33098"
	                    }
	                ],
	                "2376/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33097"
	                    }
	                ],
	                "32443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33094"
	                    }
	                ],
	                "5000/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33096"
	                    }
	                ],
	                "8441/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33095"
	                    }
	                ]
	            },
	            "SandboxKey": "/var/run/docker/netns/11283c9328ce",
	            "SecondaryIPAddresses": null,
	            "SecondaryIPv6Addresses": null,
	            "EndpointID": "",
	            "Gateway": "",
	            "GlobalIPv6Address": "",
	            "GlobalIPv6PrefixLen": 0,
	            "IPAddress": "",
	            "IPPrefixLen": 0,
	            "IPv6Gateway": "",
	            "MacAddress": "",
	            "Networks": {
	                "functional-616785": {
	                    "IPAMConfig": {
	                        "IPv4Address": "192.168.49.2"
	                    },
	                    "Links": null,
	                    "Aliases": [
	                        "8fb32b74d7c5",
	                        "functional-616785"
	                    ],
	                    "NetworkID": "74ca7b05d66aad62d2aa010c91353279be0cd04ba9f5adfee2b7f19ac02a8b0b",
	                    "EndpointID": "b3b040ba0b1e93d2839ba89f4badacd90b65ae2a20ac975e184e5c30500b6587",
	                    "Gateway": "192.168.49.1",
	                    "IPAddress": "192.168.49.2",
	                    "IPPrefixLen": 24,
	                    "IPv6Gateway": "",
	                    "GlobalIPv6Address": "",
	                    "GlobalIPv6PrefixLen": 0,
	                    "MacAddress": "02:42:c0:a8:31:02",
	                    "DriverOpts": null
	                }
	            }
	        }
	    }
	]

                                                
                                                
-- /stdout --
helpers_test.go:239: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p functional-616785 -n functional-616785
helpers_test.go:239: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Host}} -p functional-616785 -n functional-616785: exit status 2 (366.767595ms)

                                                
                                                
-- stdout --
	Running

                                                
                                                
-- /stdout --
helpers_test.go:239: status error: exit status 2 (may be ok)
helpers_test.go:244: <<< TestFunctional/serial/ExtraConfig FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======>  post-mortem[TestFunctional/serial/ExtraConfig]: minikube logs <======
helpers_test.go:247: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 logs -n 25
helpers_test.go:247: (dbg) Done: out/minikube-linux-arm64 -p functional-616785 logs -n 25: (1.676495348s)
helpers_test.go:252: TestFunctional/serial/ExtraConfig logs: 
-- stdout --
	* 
	* ==> Audit <==
	* |---------|--------------------------------------------------------------------------|-------------------|---------|---------|---------------------|---------------------|
	| Command |                                   Args                                   |      Profile      |  User   | Version |     Start Time      |      End Time       |
	|---------|--------------------------------------------------------------------------|-------------------|---------|---------|---------------------|---------------------|
	| unpause | nospam-163628 --log_dir                                                  | nospam-163628     | jenkins | v1.32.0 | 01 Dec 23 18:58 UTC | 01 Dec 23 18:58 UTC |
	|         | /tmp/nospam-163628 unpause                                               |                   |         |         |                     |                     |
	| unpause | nospam-163628 --log_dir                                                  | nospam-163628     | jenkins | v1.32.0 | 01 Dec 23 18:58 UTC | 01 Dec 23 18:58 UTC |
	|         | /tmp/nospam-163628 unpause                                               |                   |         |         |                     |                     |
	| unpause | nospam-163628 --log_dir                                                  | nospam-163628     | jenkins | v1.32.0 | 01 Dec 23 18:58 UTC | 01 Dec 23 18:58 UTC |
	|         | /tmp/nospam-163628 unpause                                               |                   |         |         |                     |                     |
	| stop    | nospam-163628 --log_dir                                                  | nospam-163628     | jenkins | v1.32.0 | 01 Dec 23 18:58 UTC | 01 Dec 23 18:58 UTC |
	|         | /tmp/nospam-163628 stop                                                  |                   |         |         |                     |                     |
	| stop    | nospam-163628 --log_dir                                                  | nospam-163628     | jenkins | v1.32.0 | 01 Dec 23 18:58 UTC | 01 Dec 23 18:58 UTC |
	|         | /tmp/nospam-163628 stop                                                  |                   |         |         |                     |                     |
	| stop    | nospam-163628 --log_dir                                                  | nospam-163628     | jenkins | v1.32.0 | 01 Dec 23 18:58 UTC | 01 Dec 23 18:58 UTC |
	|         | /tmp/nospam-163628 stop                                                  |                   |         |         |                     |                     |
	| delete  | -p nospam-163628                                                         | nospam-163628     | jenkins | v1.32.0 | 01 Dec 23 18:58 UTC | 01 Dec 23 18:58 UTC |
	| start   | -p functional-616785                                                     | functional-616785 | jenkins | v1.32.0 | 01 Dec 23 18:58 UTC | 01 Dec 23 18:59 UTC |
	|         | --memory=4000                                                            |                   |         |         |                     |                     |
	|         | --apiserver-port=8441                                                    |                   |         |         |                     |                     |
	|         | --wait=all --driver=docker                                               |                   |         |         |                     |                     |
	|         | --container-runtime=containerd                                           |                   |         |         |                     |                     |
	| start   | -p functional-616785                                                     | functional-616785 | jenkins | v1.32.0 | 01 Dec 23 18:59 UTC | 01 Dec 23 18:59 UTC |
	|         | --alsologtostderr -v=8                                                   |                   |         |         |                     |                     |
	| cache   | functional-616785 cache add                                              | functional-616785 | jenkins | v1.32.0 | 01 Dec 23 18:59 UTC | 01 Dec 23 18:59 UTC |
	|         | registry.k8s.io/pause:3.1                                                |                   |         |         |                     |                     |
	| cache   | functional-616785 cache add                                              | functional-616785 | jenkins | v1.32.0 | 01 Dec 23 18:59 UTC | 01 Dec 23 18:59 UTC |
	|         | registry.k8s.io/pause:3.3                                                |                   |         |         |                     |                     |
	| cache   | functional-616785 cache add                                              | functional-616785 | jenkins | v1.32.0 | 01 Dec 23 18:59 UTC | 01 Dec 23 18:59 UTC |
	|         | registry.k8s.io/pause:latest                                             |                   |         |         |                     |                     |
	| cache   | functional-616785 cache add                                              | functional-616785 | jenkins | v1.32.0 | 01 Dec 23 18:59 UTC | 01 Dec 23 18:59 UTC |
	|         | minikube-local-cache-test:functional-616785                              |                   |         |         |                     |                     |
	| cache   | functional-616785 cache delete                                           | functional-616785 | jenkins | v1.32.0 | 01 Dec 23 18:59 UTC | 01 Dec 23 18:59 UTC |
	|         | minikube-local-cache-test:functional-616785                              |                   |         |         |                     |                     |
	| cache   | delete                                                                   | minikube          | jenkins | v1.32.0 | 01 Dec 23 18:59 UTC | 01 Dec 23 18:59 UTC |
	|         | registry.k8s.io/pause:3.3                                                |                   |         |         |                     |                     |
	| cache   | list                                                                     | minikube          | jenkins | v1.32.0 | 01 Dec 23 18:59 UTC | 01 Dec 23 18:59 UTC |
	| ssh     | functional-616785 ssh sudo                                               | functional-616785 | jenkins | v1.32.0 | 01 Dec 23 18:59 UTC | 01 Dec 23 18:59 UTC |
	|         | crictl images                                                            |                   |         |         |                     |                     |
	| ssh     | functional-616785                                                        | functional-616785 | jenkins | v1.32.0 | 01 Dec 23 18:59 UTC | 01 Dec 23 18:59 UTC |
	|         | ssh sudo crictl rmi                                                      |                   |         |         |                     |                     |
	|         | registry.k8s.io/pause:latest                                             |                   |         |         |                     |                     |
	| ssh     | functional-616785 ssh                                                    | functional-616785 | jenkins | v1.32.0 | 01 Dec 23 18:59 UTC |                     |
	|         | sudo crictl inspecti                                                     |                   |         |         |                     |                     |
	|         | registry.k8s.io/pause:latest                                             |                   |         |         |                     |                     |
	| cache   | functional-616785 cache reload                                           | functional-616785 | jenkins | v1.32.0 | 01 Dec 23 18:59 UTC | 01 Dec 23 18:59 UTC |
	| ssh     | functional-616785 ssh                                                    | functional-616785 | jenkins | v1.32.0 | 01 Dec 23 18:59 UTC | 01 Dec 23 18:59 UTC |
	|         | sudo crictl inspecti                                                     |                   |         |         |                     |                     |
	|         | registry.k8s.io/pause:latest                                             |                   |         |         |                     |                     |
	| cache   | delete                                                                   | minikube          | jenkins | v1.32.0 | 01 Dec 23 18:59 UTC | 01 Dec 23 18:59 UTC |
	|         | registry.k8s.io/pause:3.1                                                |                   |         |         |                     |                     |
	| cache   | delete                                                                   | minikube          | jenkins | v1.32.0 | 01 Dec 23 18:59 UTC | 01 Dec 23 18:59 UTC |
	|         | registry.k8s.io/pause:latest                                             |                   |         |         |                     |                     |
	| kubectl | functional-616785 kubectl --                                             | functional-616785 | jenkins | v1.32.0 | 01 Dec 23 18:59 UTC | 01 Dec 23 18:59 UTC |
	|         | --context functional-616785                                              |                   |         |         |                     |                     |
	|         | get pods                                                                 |                   |         |         |                     |                     |
	| start   | -p functional-616785                                                     | functional-616785 | jenkins | v1.32.0 | 01 Dec 23 18:59 UTC |                     |
	|         | --extra-config=apiserver.enable-admission-plugins=NamespaceAutoProvision |                   |         |         |                     |                     |
	|         | --wait=all                                                               |                   |         |         |                     |                     |
	|---------|--------------------------------------------------------------------------|-------------------|---------|---------|---------------------|---------------------|
	
	* 
	* ==> Last Start <==
	* Log file created at: 2023/12/01 18:59:55
	Running on machine: ip-172-31-31-251
	Binary: Built with gc go1.21.4 for linux/arm64
	Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
	I1201 18:59:55.615978  284652 out.go:296] Setting OutFile to fd 1 ...
	I1201 18:59:55.616150  284652 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I1201 18:59:55.616154  284652 out.go:309] Setting ErrFile to fd 2...
	I1201 18:59:55.616158  284652 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I1201 18:59:55.616415  284652 root.go:338] Updating PATH: /home/jenkins/minikube-integration/17703-252966/.minikube/bin
	I1201 18:59:55.617256  284652 out.go:303] Setting JSON to false
	I1201 18:59:55.618314  284652 start.go:128] hostinfo: {"hostname":"ip-172-31-31-251","uptime":6142,"bootTime":1701451054,"procs":240,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1050-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"982e3628-3742-4b3e-bb63-ac1b07660ec7"}
	I1201 18:59:55.618381  284652 start.go:138] virtualization:  
	I1201 18:59:55.620456  284652 out.go:177] * [functional-616785] minikube v1.32.0 on Ubuntu 20.04 (arm64)
	I1201 18:59:55.622614  284652 out.go:177]   - MINIKUBE_LOCATION=17703
	I1201 18:59:55.622808  284652 notify.go:220] Checking for updates...
	I1201 18:59:55.624517  284652 out.go:177]   - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I1201 18:59:55.626290  284652 out.go:177]   - KUBECONFIG=/home/jenkins/minikube-integration/17703-252966/kubeconfig
	I1201 18:59:55.628084  284652 out.go:177]   - MINIKUBE_HOME=/home/jenkins/minikube-integration/17703-252966/.minikube
	I1201 18:59:55.629702  284652 out.go:177]   - MINIKUBE_BIN=out/minikube-linux-arm64
	I1201 18:59:55.631323  284652 out.go:177]   - MINIKUBE_FORCE_SYSTEMD=
	I1201 18:59:55.633537  284652 config.go:182] Loaded profile config "functional-616785": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.4
	I1201 18:59:55.633635  284652 driver.go:392] Setting default libvirt URI to qemu:///system
	I1201 18:59:55.658918  284652 docker.go:122] docker version: linux-24.0.7:Docker Engine - Community
	I1201 18:59:55.659044  284652 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I1201 18:59:55.756934  284652 info.go:266] docker info: {ID:EOU5:DNGX:XN6V:L2FZ:UXRM:5TWK:EVUR:KC2F:GT7Z:Y4O4:GB77:5PD3 Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:35 OomKillDisable:true NGoroutines:54 SystemTime:2023-12-01 18:59:55.745393931 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1050-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archi
tecture:aarch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8215040000 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-31-251 Labels:[] ExperimentalBuild:false ServerVersion:24.0.7 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f Expected:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f} RuncCommit:{ID:v1.1.10-0-g18a0cb0 Expected:v1.1.10-0-g18a0cb0} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.21.0]] Warnings:<nil>}}
	I1201 18:59:55.757022  284652 docker.go:295] overlay module found
	I1201 18:59:55.759278  284652 out.go:177] * Using the docker driver based on existing profile
	I1201 18:59:55.761446  284652 start.go:298] selected driver: docker
	I1201 18:59:55.761457  284652 start.go:902] validating driver "docker" against &{Name:functional-616785 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f Memory:4000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.4 ClusterName:functional-616785 Namespace:default APIServerName:miniku
beCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8441 NodeName:} Nodes:[{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.28.4 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPor
t:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 AutoPauseInterval:1m0s GPUs:}
	I1201 18:59:55.761545  284652 start.go:913] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
	I1201 18:59:55.761652  284652 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I1201 18:59:55.847452  284652 info.go:266] docker info: {ID:EOU5:DNGX:XN6V:L2FZ:UXRM:5TWK:EVUR:KC2F:GT7Z:Y4O4:GB77:5PD3 Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:35 OomKillDisable:true NGoroutines:54 SystemTime:2023-12-01 18:59:55.829174796 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1050-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archi
tecture:aarch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8215040000 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-31-251 Labels:[] ExperimentalBuild:false ServerVersion:24.0.7 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f Expected:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f} RuncCommit:{ID:v1.1.10-0-g18a0cb0 Expected:v1.1.10-0-g18a0cb0} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.21.0]] Warnings:<nil>}}
	I1201 18:59:55.847893  284652 start_flags.go:931] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
	I1201 18:59:55.847955  284652 cni.go:84] Creating CNI manager for ""
	I1201 18:59:55.847963  284652 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
	I1201 18:59:55.847971  284652 start_flags.go:323] config:
	{Name:functional-616785 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f Memory:4000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.4 ClusterName:functional-616785 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:
containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8441 NodeName:} Nodes:[{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.28.4 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort
:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 AutoPauseInterval:1m0s GPUs:}
	I1201 18:59:55.849912  284652 out.go:177] * Starting control plane node functional-616785 in cluster functional-616785
	I1201 18:59:55.851651  284652 cache.go:121] Beginning downloading kic base image for docker with containerd
	I1201 18:59:55.853530  284652 out.go:177] * Pulling base image ...
	I1201 18:59:55.855193  284652 preload.go:132] Checking if preload exists for k8s version v1.28.4 and runtime containerd
	I1201 18:59:55.855237  284652 preload.go:148] Found local preload: /home/jenkins/minikube-integration/17703-252966/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.4-containerd-overlay2-arm64.tar.lz4
	I1201 18:59:55.855244  284652 cache.go:56] Caching tarball of preloaded images
	I1201 18:59:55.855280  284652 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f in local docker daemon
	I1201 18:59:55.855329  284652 preload.go:174] Found /home/jenkins/minikube-integration/17703-252966/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.4-containerd-overlay2-arm64.tar.lz4 in cache, skipping download
	I1201 18:59:55.855338  284652 cache.go:59] Finished verifying existence of preloaded tar for  v1.28.4 on containerd
	I1201 18:59:55.855451  284652 profile.go:148] Saving config to /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/config.json ...
	I1201 18:59:55.873456  284652 image.go:83] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f in local docker daemon, skipping pull
	I1201 18:59:55.873470  284652 cache.go:144] gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f exists in daemon, skipping load
	I1201 18:59:55.873483  284652 cache.go:194] Successfully downloaded all kic artifacts
	I1201 18:59:55.873531  284652 start.go:365] acquiring machines lock for functional-616785: {Name:mk7ba1999eae2f42d2c57a82c994f0f066e74e62 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I1201 18:59:55.873593  284652 start.go:369] acquired machines lock for "functional-616785" in 43.75µs
	I1201 18:59:55.873611  284652 start.go:96] Skipping create...Using existing machine configuration
	I1201 18:59:55.873615  284652 fix.go:54] fixHost starting: 
	I1201 18:59:55.873899  284652 cli_runner.go:164] Run: docker container inspect functional-616785 --format={{.State.Status}}
	I1201 18:59:55.893018  284652 fix.go:102] recreateIfNeeded on functional-616785: state=Running err=<nil>
	W1201 18:59:55.893046  284652 fix.go:128] unexpected machine state, will restart: <nil>
	I1201 18:59:55.895272  284652 out.go:177] * Updating the running docker "functional-616785" container ...
	I1201 18:59:55.896970  284652 machine.go:88] provisioning docker machine ...
	I1201 18:59:55.897001  284652 ubuntu.go:169] provisioning hostname "functional-616785"
	I1201 18:59:55.897079  284652 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-616785
	I1201 18:59:55.918341  284652 main.go:141] libmachine: Using SSH client type: native
	I1201 18:59:55.918764  284652 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3be600] 0x3c0d70 <nil>  [] 0s} 127.0.0.1 33098 <nil> <nil>}
	I1201 18:59:55.918775  284652 main.go:141] libmachine: About to run SSH command:
	sudo hostname functional-616785 && echo "functional-616785" | sudo tee /etc/hostname
	I1201 18:59:56.088809  284652 main.go:141] libmachine: SSH cmd err, output: <nil>: functional-616785
	
	I1201 18:59:56.088884  284652 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-616785
	I1201 18:59:56.109943  284652 main.go:141] libmachine: Using SSH client type: native
	I1201 18:59:56.110358  284652 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3be600] 0x3c0d70 <nil>  [] 0s} 127.0.0.1 33098 <nil> <nil>}
	I1201 18:59:56.110375  284652 main.go:141] libmachine: About to run SSH command:
	
			if ! grep -xq '.*\sfunctional-616785' /etc/hosts; then
				if grep -xq '127.0.1.1\s.*' /etc/hosts; then
					sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 functional-616785/g' /etc/hosts;
				else 
					echo '127.0.1.1 functional-616785' | sudo tee -a /etc/hosts; 
				fi
			fi
	I1201 18:59:56.262166  284652 main.go:141] libmachine: SSH cmd err, output: <nil>: 
	I1201 18:59:56.262184  284652 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/17703-252966/.minikube CaCertPath:/home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/17703-252966/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/17703-252966/.minikube}
	I1201 18:59:56.262208  284652 ubuntu.go:177] setting up certificates
	I1201 18:59:56.262218  284652 provision.go:83] configureAuth start
	I1201 18:59:56.262288  284652 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" functional-616785
	I1201 18:59:56.284151  284652 provision.go:138] copyHostCerts
	I1201 18:59:56.284209  284652 exec_runner.go:144] found /home/jenkins/minikube-integration/17703-252966/.minikube/ca.pem, removing ...
	I1201 18:59:56.284217  284652 exec_runner.go:203] rm: /home/jenkins/minikube-integration/17703-252966/.minikube/ca.pem
	I1201 18:59:56.284294  284652 exec_runner.go:151] cp: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/17703-252966/.minikube/ca.pem (1078 bytes)
	I1201 18:59:56.284398  284652 exec_runner.go:144] found /home/jenkins/minikube-integration/17703-252966/.minikube/cert.pem, removing ...
	I1201 18:59:56.284401  284652 exec_runner.go:203] rm: /home/jenkins/minikube-integration/17703-252966/.minikube/cert.pem
	I1201 18:59:56.284427  284652 exec_runner.go:151] cp: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/17703-252966/.minikube/cert.pem (1123 bytes)
	I1201 18:59:56.284516  284652 exec_runner.go:144] found /home/jenkins/minikube-integration/17703-252966/.minikube/key.pem, removing ...
	I1201 18:59:56.284521  284652 exec_runner.go:203] rm: /home/jenkins/minikube-integration/17703-252966/.minikube/key.pem
	I1201 18:59:56.284547  284652 exec_runner.go:151] cp: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/17703-252966/.minikube/key.pem (1679 bytes)
	I1201 18:59:56.284595  284652 provision.go:112] generating server cert: /home/jenkins/minikube-integration/17703-252966/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca-key.pem org=jenkins.functional-616785 san=[192.168.49.2 127.0.0.1 localhost 127.0.0.1 minikube functional-616785]
	I1201 18:59:56.682978  284652 provision.go:172] copyRemoteCerts
	I1201 18:59:56.683037  284652 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
	I1201 18:59:56.683077  284652 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-616785
	I1201 18:59:56.708095  284652 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33098 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/functional-616785/id_rsa Username:docker}
	I1201 18:59:56.815437  284652 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
	I1201 18:59:56.850398  284652 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/machines/server.pem --> /etc/docker/server.pem (1229 bytes)
	I1201 18:59:56.881389  284652 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
	I1201 18:59:56.911608  284652 provision.go:86] duration metric: configureAuth took 649.363698ms
	I1201 18:59:56.911625  284652 ubuntu.go:193] setting minikube options for container-runtime
	I1201 18:59:56.911822  284652 config.go:182] Loaded profile config "functional-616785": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.4
	I1201 18:59:56.911828  284652 machine.go:91] provisioned docker machine in 1.014842252s
	I1201 18:59:56.911833  284652 start.go:300] post-start starting for "functional-616785" (driver="docker")
	I1201 18:59:56.911844  284652 start.go:329] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
	I1201 18:59:56.911894  284652 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
	I1201 18:59:56.911930  284652 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-616785
	I1201 18:59:56.930511  284652 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33098 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/functional-616785/id_rsa Username:docker}
	I1201 18:59:57.036977  284652 ssh_runner.go:195] Run: cat /etc/os-release
	I1201 18:59:57.041577  284652 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
	I1201 18:59:57.041603  284652 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
	I1201 18:59:57.041612  284652 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
	I1201 18:59:57.041619  284652 info.go:137] Remote host: Ubuntu 22.04.3 LTS
	I1201 18:59:57.041628  284652 filesync.go:126] Scanning /home/jenkins/minikube-integration/17703-252966/.minikube/addons for local assets ...
	I1201 18:59:57.041688  284652 filesync.go:126] Scanning /home/jenkins/minikube-integration/17703-252966/.minikube/files for local assets ...
	I1201 18:59:57.041764  284652 filesync.go:149] local asset: /home/jenkins/minikube-integration/17703-252966/.minikube/files/etc/ssl/certs/2583012.pem -> 2583012.pem in /etc/ssl/certs
	I1201 18:59:57.041841  284652 filesync.go:149] local asset: /home/jenkins/minikube-integration/17703-252966/.minikube/files/etc/test/nested/copy/258301/hosts -> hosts in /etc/test/nested/copy/258301
	I1201 18:59:57.041896  284652 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs /etc/test/nested/copy/258301
	I1201 18:59:57.054186  284652 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/files/etc/ssl/certs/2583012.pem --> /etc/ssl/certs/2583012.pem (1708 bytes)
	I1201 18:59:57.086073  284652 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/files/etc/test/nested/copy/258301/hosts --> /etc/test/nested/copy/258301/hosts (40 bytes)
	I1201 18:59:57.118038  284652 start.go:303] post-start completed in 206.189948ms
	I1201 18:59:57.118126  284652 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I1201 18:59:57.118167  284652 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-616785
	I1201 18:59:57.137858  284652 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33098 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/functional-616785/id_rsa Username:docker}
	I1201 18:59:57.238766  284652 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
	I1201 18:59:57.245250  284652 fix.go:56] fixHost completed within 1.371626283s
	I1201 18:59:57.245266  284652 start.go:83] releasing machines lock for "functional-616785", held for 1.371666038s
	I1201 18:59:57.245347  284652 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" functional-616785
	I1201 18:59:57.263909  284652 ssh_runner.go:195] Run: cat /version.json
	I1201 18:59:57.263954  284652 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-616785
	I1201 18:59:57.264228  284652 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
	I1201 18:59:57.264281  284652 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-616785
	I1201 18:59:57.282625  284652 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33098 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/functional-616785/id_rsa Username:docker}
	I1201 18:59:57.304543  284652 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33098 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/functional-616785/id_rsa Username:docker}
	I1201 18:59:57.389146  284652 ssh_runner.go:195] Run: systemctl --version
	I1201 18:59:57.527936  284652 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
	I1201 18:59:57.533677  284652 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
	I1201 18:59:57.557046  284652 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
	I1201 18:59:57.557116  284652 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%!p(MISSING), " -exec sh -c "sudo mv {} {}.mk_disabled" ;
	I1201 18:59:57.567961  284652 cni.go:259] no active bridge cni configs found in "/etc/cni/net.d" - nothing to disable
	I1201 18:59:57.567984  284652 start.go:475] detecting cgroup driver to use...
	I1201 18:59:57.568024  284652 detect.go:196] detected "cgroupfs" cgroup driver on host os
	I1201 18:59:57.568072  284652 ssh_runner.go:195] Run: sudo systemctl stop -f crio
	I1201 18:59:57.583616  284652 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
	I1201 18:59:57.597365  284652 docker.go:203] disabling cri-docker service (if available) ...
	I1201 18:59:57.597425  284652 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
	I1201 18:59:57.612564  284652 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
	I1201 18:59:57.627735  284652 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
	I1201 18:59:57.744845  284652 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
	I1201 18:59:57.872977  284652 docker.go:219] disabling docker service ...
	I1201 18:59:57.873042  284652 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
	I1201 18:59:57.887845  284652 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
	I1201 18:59:57.901426  284652 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
	I1201 18:59:58.027596  284652 ssh_runner.go:195] Run: sudo systemctl mask docker.service
	I1201 18:59:58.155228  284652 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
	I1201 18:59:58.169700  284652 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %!s(MISSING) "runtime-endpoint: unix:///run/containerd/containerd.sock
	" | sudo tee /etc/crictl.yaml"
	I1201 18:59:58.190749  284652 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.9"|' /etc/containerd/config.toml"
	I1201 18:59:58.203508  284652 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
	I1201 18:59:58.217134  284652 containerd.go:145] configuring containerd to use "cgroupfs" as cgroup driver...
	I1201 18:59:58.217201  284652 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
	I1201 18:59:58.230118  284652 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I1201 18:59:58.242232  284652 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
	I1201 18:59:58.254343  284652 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I1201 18:59:58.267085  284652 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
	I1201 18:59:58.278909  284652 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
	I1201 18:59:58.292855  284652 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
	I1201 18:59:58.303063  284652 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
	I1201 18:59:58.313254  284652 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1201 18:59:58.442855  284652 ssh_runner.go:195] Run: sudo systemctl restart containerd
	I1201 18:59:58.659034  284652 start.go:522] Will wait 60s for socket path /run/containerd/containerd.sock
	I1201 18:59:58.659105  284652 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
	I1201 18:59:58.664191  284652 start.go:543] Will wait 60s for crictl version
	I1201 18:59:58.664244  284652 ssh_runner.go:195] Run: which crictl
	I1201 18:59:58.668768  284652 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
	I1201 18:59:58.711585  284652 start.go:559] Version:  0.1.0
	RuntimeName:  containerd
	RuntimeVersion:  1.6.25
	RuntimeApiVersion:  v1
	I1201 18:59:58.711654  284652 ssh_runner.go:195] Run: containerd --version
	I1201 18:59:58.743914  284652 ssh_runner.go:195] Run: containerd --version
	I1201 18:59:58.774417  284652 out.go:177] * Preparing Kubernetes v1.28.4 on containerd 1.6.25 ...
	I1201 18:59:58.776243  284652 cli_runner.go:164] Run: docker network inspect functional-616785 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
	I1201 18:59:58.807956  284652 ssh_runner.go:195] Run: grep 192.168.49.1	host.minikube.internal$ /etc/hosts
	I1201 18:59:58.815592  284652 out.go:177]   - apiserver.enable-admission-plugins=NamespaceAutoProvision
	I1201 18:59:58.817338  284652 preload.go:132] Checking if preload exists for k8s version v1.28.4 and runtime containerd
	I1201 18:59:58.817417  284652 ssh_runner.go:195] Run: sudo crictl images --output json
	I1201 18:59:58.862005  284652 containerd.go:604] all images are preloaded for containerd runtime.
	I1201 18:59:58.862017  284652 containerd.go:518] Images already preloaded, skipping extraction
	I1201 18:59:58.862068  284652 ssh_runner.go:195] Run: sudo crictl images --output json
	I1201 18:59:58.904434  284652 containerd.go:604] all images are preloaded for containerd runtime.
	I1201 18:59:58.904450  284652 cache_images.go:84] Images are preloaded, skipping loading
	I1201 18:59:58.904561  284652 ssh_runner.go:195] Run: sudo crictl info
	I1201 18:59:58.945488  284652 extraconfig.go:124] Overwriting default enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota with user provided enable-admission-plugins=NamespaceAutoProvision for component apiserver
	I1201 18:59:58.945515  284652 cni.go:84] Creating CNI manager for ""
	I1201 18:59:58.945526  284652 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
	I1201 18:59:58.945533  284652 kubeadm.go:87] Using pod CIDR: 10.244.0.0/16
	I1201 18:59:58.945549  284652 kubeadm.go:176] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8441 KubernetesVersion:v1.28.4 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:functional-616785 NodeName:functional-616785 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceAutoProvision] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfi
gOpts:map[hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
	I1201 18:59:58.945665  284652 kubeadm.go:181] kubeadm config:
	apiVersion: kubeadm.k8s.io/v1beta3
	kind: InitConfiguration
	localAPIEndpoint:
	  advertiseAddress: 192.168.49.2
	  bindPort: 8441
	bootstrapTokens:
	  - groups:
	      - system:bootstrappers:kubeadm:default-node-token
	    ttl: 24h0m0s
	    usages:
	      - signing
	      - authentication
	nodeRegistration:
	  criSocket: unix:///run/containerd/containerd.sock
	  name: "functional-616785"
	  kubeletExtraArgs:
	    node-ip: 192.168.49.2
	  taints: []
	---
	apiVersion: kubeadm.k8s.io/v1beta3
	kind: ClusterConfiguration
	apiServer:
	  certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
	  extraArgs:
	    enable-admission-plugins: "NamespaceAutoProvision"
	controllerManager:
	  extraArgs:
	    allocate-node-cidrs: "true"
	    leader-elect: "false"
	scheduler:
	  extraArgs:
	    leader-elect: "false"
	certificatesDir: /var/lib/minikube/certs
	clusterName: mk
	controlPlaneEndpoint: control-plane.minikube.internal:8441
	etcd:
	  local:
	    dataDir: /var/lib/minikube/etcd
	    extraArgs:
	      proxy-refresh-interval: "70000"
	kubernetesVersion: v1.28.4
	networking:
	  dnsDomain: cluster.local
	  podSubnet: "10.244.0.0/16"
	  serviceSubnet: 10.96.0.0/12
	---
	apiVersion: kubelet.config.k8s.io/v1beta1
	kind: KubeletConfiguration
	authentication:
	  x509:
	    clientCAFile: /var/lib/minikube/certs/ca.crt
	cgroupDriver: cgroupfs
	hairpinMode: hairpin-veth
	runtimeRequestTimeout: 15m
	clusterDomain: "cluster.local"
	# disable disk resource management by default
	imageGCHighThresholdPercent: 100
	evictionHard:
	  nodefs.available: "0%!"(MISSING)
	  nodefs.inodesFree: "0%!"(MISSING)
	  imagefs.available: "0%!"(MISSING)
	failSwapOn: false
	staticPodPath: /etc/kubernetes/manifests
	---
	apiVersion: kubeproxy.config.k8s.io/v1alpha1
	kind: KubeProxyConfiguration
	clusterCIDR: "10.244.0.0/16"
	metricsBindAddress: 0.0.0.0:10249
	conntrack:
	  maxPerCore: 0
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
	  tcpEstablishedTimeout: 0s
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
	  tcpCloseWaitTimeout: 0s
	
	I1201 18:59:58.945726  284652 kubeadm.go:976] kubelet [Unit]
	Wants=containerd.service
	
	[Service]
	ExecStart=
	ExecStart=/var/lib/minikube/binaries/v1.28.4/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --container-runtime-endpoint=unix:///run/containerd/containerd.sock --hostname-override=functional-616785 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
	
	[Install]
	 config:
	{KubernetesVersion:v1.28.4 ClusterName:functional-616785 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8441 NodeName:}
	I1201 18:59:58.945786  284652 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.28.4
	I1201 18:59:58.956311  284652 binaries.go:44] Found k8s binaries, skipping transfer
	I1201 18:59:58.956381  284652 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
	I1201 18:59:58.966753  284652 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (389 bytes)
	I1201 18:59:58.989937  284652 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
	I1201 18:59:59.014337  284652 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (1956 bytes)
	I1201 18:59:59.037366  284652 ssh_runner.go:195] Run: grep 192.168.49.2	control-plane.minikube.internal$ /etc/hosts
	I1201 18:59:59.042025  284652 certs.go:56] Setting up /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785 for IP: 192.168.49.2
	I1201 18:59:59.042048  284652 certs.go:190] acquiring lock for shared ca certs: {Name:mk799b1e63d23a413d1b6e34a0169dabbea1b951 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1201 18:59:59.042193  284652 certs.go:199] skipping minikubeCA CA generation: /home/jenkins/minikube-integration/17703-252966/.minikube/ca.key
	I1201 18:59:59.042238  284652 certs.go:199] skipping proxyClientCA CA generation: /home/jenkins/minikube-integration/17703-252966/.minikube/proxy-client-ca.key
	I1201 18:59:59.042310  284652 certs.go:315] skipping minikube-user signed cert generation: /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/client.key
	I1201 18:59:59.042352  284652 certs.go:315] skipping minikube signed cert generation: /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/apiserver.key.dd3b5fb2
	I1201 18:59:59.042402  284652 certs.go:315] skipping aggregator signed cert generation: /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/proxy-client.key
	I1201 18:59:59.042510  284652 certs.go:437] found cert: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/home/jenkins/minikube-integration/17703-252966/.minikube/certs/258301.pem (1338 bytes)
	W1201 18:59:59.042536  284652 certs.go:433] ignoring /home/jenkins/minikube-integration/17703-252966/.minikube/certs/home/jenkins/minikube-integration/17703-252966/.minikube/certs/258301_empty.pem, impossibly tiny 0 bytes
	I1201 18:59:59.042544  284652 certs.go:437] found cert: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca-key.pem (1675 bytes)
	I1201 18:59:59.042568  284652 certs.go:437] found cert: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca.pem (1078 bytes)
	I1201 18:59:59.042592  284652 certs.go:437] found cert: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/home/jenkins/minikube-integration/17703-252966/.minikube/certs/cert.pem (1123 bytes)
	I1201 18:59:59.042617  284652 certs.go:437] found cert: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/home/jenkins/minikube-integration/17703-252966/.minikube/certs/key.pem (1679 bytes)
	I1201 18:59:59.042683  284652 certs.go:437] found cert: /home/jenkins/minikube-integration/17703-252966/.minikube/files/etc/ssl/certs/home/jenkins/minikube-integration/17703-252966/.minikube/files/etc/ssl/certs/2583012.pem (1708 bytes)
	I1201 18:59:59.043457  284652 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1399 bytes)
	I1201 18:59:59.073387  284652 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
	I1201 18:59:59.105153  284652 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
	I1201 18:59:59.138360  284652 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
	I1201 18:59:59.167982  284652 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
	I1201 18:59:59.198786  284652 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
	I1201 18:59:59.230510  284652 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
	I1201 18:59:59.259864  284652 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
	I1201 18:59:59.290234  284652 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
	I1201 18:59:59.319681  284652 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/certs/258301.pem --> /usr/share/ca-certificates/258301.pem (1338 bytes)
	I1201 18:59:59.349001  284652 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/files/etc/ssl/certs/2583012.pem --> /usr/share/ca-certificates/2583012.pem (1708 bytes)
	I1201 18:59:59.379151  284652 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
	I1201 18:59:59.400764  284652 ssh_runner.go:195] Run: openssl version
	I1201 18:59:59.408388  284652 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
	I1201 18:59:59.420594  284652 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
	I1201 18:59:59.425790  284652 certs.go:480] hashing: -rw-r--r-- 1 root root 1111 Dec  1 18:52 /usr/share/ca-certificates/minikubeCA.pem
	I1201 18:59:59.425848  284652 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
	I1201 18:59:59.434615  284652 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
	I1201 18:59:59.445634  284652 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/258301.pem && ln -fs /usr/share/ca-certificates/258301.pem /etc/ssl/certs/258301.pem"
	I1201 18:59:59.457374  284652 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/258301.pem
	I1201 18:59:59.462082  284652 certs.go:480] hashing: -rw-r--r-- 1 root root 1338 Dec  1 18:58 /usr/share/ca-certificates/258301.pem
	I1201 18:59:59.462148  284652 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/258301.pem
	I1201 18:59:59.470831  284652 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/258301.pem /etc/ssl/certs/51391683.0"
	I1201 18:59:59.481952  284652 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/2583012.pem && ln -fs /usr/share/ca-certificates/2583012.pem /etc/ssl/certs/2583012.pem"
	I1201 18:59:59.494085  284652 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/2583012.pem
	I1201 18:59:59.499453  284652 certs.go:480] hashing: -rw-r--r-- 1 root root 1708 Dec  1 18:58 /usr/share/ca-certificates/2583012.pem
	I1201 18:59:59.499520  284652 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/2583012.pem
	I1201 18:59:59.508372  284652 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/2583012.pem /etc/ssl/certs/3ec20f2e.0"
	I1201 18:59:59.519920  284652 ssh_runner.go:195] Run: ls /var/lib/minikube/certs/etcd
	I1201 18:59:59.524900  284652 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-etcd-client.crt -checkend 86400
	I1201 18:59:59.534001  284652 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-kubelet-client.crt -checkend 86400
	I1201 18:59:59.542551  284652 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/server.crt -checkend 86400
	I1201 18:59:59.551194  284652 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/healthcheck-client.crt -checkend 86400
	I1201 18:59:59.559847  284652 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/peer.crt -checkend 86400
	I1201 18:59:59.568456  284652 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/front-proxy-client.crt -checkend 86400
	I1201 18:59:59.577088  284652 kubeadm.go:404] StartCluster: {Name:functional-616785 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f Memory:4000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.4 ClusterName:functional-616785 Namespace:default APIServerName:minikubeCA APIServerNames:
[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8441 NodeName:} Nodes:[{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.28.4 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID
:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 AutoPauseInterval:1m0s GPUs:}
	I1201 18:59:59.577165  284652 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
	I1201 18:59:59.577227  284652 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
	I1201 18:59:59.619843  284652 cri.go:89] found id: "172a910da74d5d4bd72533e43c42af2574c81da2cb8735f3148fb22366fc7b38"
	I1201 18:59:59.619856  284652 cri.go:89] found id: "6f24a29e8dde904ee7c79191a05e4233a4d3c4611dec66af1e784899ffd2f4fa"
	I1201 18:59:59.619861  284652 cri.go:89] found id: "9efdb32584bcd1c4938cb1074c27c1b3d95cdc91deea1013f2b99669c0621313"
	I1201 18:59:59.619865  284652 cri.go:89] found id: "1fb932a179c7041614a4fcc5715379bec1bebfa33c81139f5df677e9fa11945f"
	I1201 18:59:59.619868  284652 cri.go:89] found id: "214de4fd9c7633a02aa05db38f69168fa0962e31a183a34b3a3afb40315ef80e"
	I1201 18:59:59.619872  284652 cri.go:89] found id: "0c2d4da8c8cce66a5ddb65b0ca03546edefdf740f59669699af3f7f312913564"
	I1201 18:59:59.619876  284652 cri.go:89] found id: "2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648"
	I1201 18:59:59.619879  284652 cri.go:89] found id: "6353341ab4be94e3126a82f86e5b7b6c810a95a11c1532a6524b40001c56ec1e"
	I1201 18:59:59.619883  284652 cri.go:89] found id: "a01aebe75ba55e11656030032922a9fe89ca68f6b88e61a6565e71e38c259849"
	I1201 18:59:59.619897  284652 cri.go:89] found id: ""
	I1201 18:59:59.619947  284652 ssh_runner.go:195] Run: sudo runc --root /run/containerd/runc/k8s.io list -f json
	I1201 18:59:59.654556  284652 cri.go:116] JSON = [{"ociVersion":"1.0.2-dev","id":"0c2d4da8c8cce66a5ddb65b0ca03546edefdf740f59669699af3f7f312913564","pid":1315,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/0c2d4da8c8cce66a5ddb65b0ca03546edefdf740f59669699af3f7f312913564","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/0c2d4da8c8cce66a5ddb65b0ca03546edefdf740f59669699af3f7f312913564/rootfs","created":"2023-12-01T18:59:00.62864878Z","annotations":{"io.kubernetes.cri.container-name":"etcd","io.kubernetes.cri.container-type":"container","io.kubernetes.cri.image-name":"registry.k8s.io/etcd:3.5.9-0","io.kubernetes.cri.sandbox-id":"5eee9bb6b890be25088c43c50c60ee0dd94c9533df6a93fd95e571b722f603e8","io.kubernetes.cri.sandbox-name":"etcd-functional-616785","io.kubernetes.cri.sandbox-namespace":"kube-system","io.kubernetes.cri.sandbox-uid":"7d7369424179d18b810696b6cd5e0c34"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"172a910da74d5d4bd72533e43c42af2574c81da2
cb8735f3148fb22366fc7b38","pid":2893,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/172a910da74d5d4bd72533e43c42af2574c81da2cb8735f3148fb22366fc7b38","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/172a910da74d5d4bd72533e43c42af2574c81da2cb8735f3148fb22366fc7b38/rootfs","created":"2023-12-01T18:59:53.7126854Z","annotations":{"io.kubernetes.cri.container-name":"storage-provisioner","io.kubernetes.cri.container-type":"container","io.kubernetes.cri.image-name":"gcr.io/k8s-minikube/storage-provisioner:v5","io.kubernetes.cri.sandbox-id":"8b55a4531eadf2b7b7ac4b6e98d285594d33022051b789a55aac0bc3c5faebf2","io.kubernetes.cri.sandbox-name":"storage-provisioner","io.kubernetes.cri.sandbox-namespace":"kube-system","io.kubernetes.cri.sandbox-uid":"393607ea-a066-4d39-93eb-75c52a6ab29e"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"1ade3bcae44a0456b38605717b59a54930e914ca80145f4a6a42f69f2c89bc4c","pid":1804,"status":"running","bundle":"/run/containerd/io.containerd.runtim
e.v2.task/k8s.io/1ade3bcae44a0456b38605717b59a54930e914ca80145f4a6a42f69f2c89bc4c","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/1ade3bcae44a0456b38605717b59a54930e914ca80145f4a6a42f69f2c89bc4c/rootfs","created":"2023-12-01T18:59:23.301279068Z","annotations":{"io.kubernetes.cri.container-type":"sandbox","io.kubernetes.cri.sandbox-cpu-period":"100000","io.kubernetes.cri.sandbox-cpu-quota":"10000","io.kubernetes.cri.sandbox-cpu-shares":"102","io.kubernetes.cri.sandbox-id":"1ade3bcae44a0456b38605717b59a54930e914ca80145f4a6a42f69f2c89bc4c","io.kubernetes.cri.sandbox-log-directory":"/var/log/pods/kube-system_kindnet-6zm7x_e02380f9-bcfc-4a99-a5c0-e2372e5f0ad4","io.kubernetes.cri.sandbox-memory":"52428800","io.kubernetes.cri.sandbox-name":"kindnet-6zm7x","io.kubernetes.cri.sandbox-namespace":"kube-system","io.kubernetes.cri.sandbox-uid":"e02380f9-bcfc-4a99-a5c0-e2372e5f0ad4"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"1fb932a179c7041614a4fcc5715379bec1bebfa33c81139f5df677e9fa11945f","pid":1855,
"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/1fb932a179c7041614a4fcc5715379bec1bebfa33c81139f5df677e9fa11945f","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/1fb932a179c7041614a4fcc5715379bec1bebfa33c81139f5df677e9fa11945f/rootfs","created":"2023-12-01T18:59:23.40394988Z","annotations":{"io.kubernetes.cri.container-name":"kube-proxy","io.kubernetes.cri.container-type":"container","io.kubernetes.cri.image-name":"registry.k8s.io/kube-proxy:v1.28.4","io.kubernetes.cri.sandbox-id":"d7b2917b16b33132226cd5898d50cbfcba0d36be4be642e783846162063ff175","io.kubernetes.cri.sandbox-name":"kube-proxy-d8cvf","io.kubernetes.cri.sandbox-namespace":"kube-system","io.kubernetes.cri.sandbox-uid":"f95b7b7d-84db-44db-8038-dcfd7e1ab770"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648","pid":1326,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/2688cc66adf8e4f8e9a5996f8a27f4527b00c77
41e59aadea1022fc868f95648","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648/rootfs","created":"2023-12-01T18:59:00.648416121Z","annotations":{"io.kubernetes.cri.container-name":"kube-apiserver","io.kubernetes.cri.container-type":"container","io.kubernetes.cri.image-name":"registry.k8s.io/kube-apiserver:v1.28.4","io.kubernetes.cri.sandbox-id":"f39292a0989585111b50d4115a545d232df69b9f78a22cca8cad460e8ee16199","io.kubernetes.cri.sandbox-name":"kube-apiserver-functional-616785","io.kubernetes.cri.sandbox-namespace":"kube-system","io.kubernetes.cri.sandbox-uid":"399196d44eb6c509f7b60d72c5662125"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"35fdb6cba245d4380dd978af7332e0abf05039d07409269e23de390bfcaa6950","pid":2105,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/35fdb6cba245d4380dd978af7332e0abf05039d07409269e23de390bfcaa6950","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/35fdb6cba2
45d4380dd978af7332e0abf05039d07409269e23de390bfcaa6950/rootfs","created":"2023-12-01T18:59:37.461761511Z","annotations":{"io.kubernetes.cri.container-type":"sandbox","io.kubernetes.cri.sandbox-cpu-period":"100000","io.kubernetes.cri.sandbox-cpu-quota":"0","io.kubernetes.cri.sandbox-cpu-shares":"102","io.kubernetes.cri.sandbox-id":"35fdb6cba245d4380dd978af7332e0abf05039d07409269e23de390bfcaa6950","io.kubernetes.cri.sandbox-log-directory":"/var/log/pods/kube-system_coredns-5dd5756b68-ts5dh_daaa92f6-6743-4e3e-a5e9-0d3bea42c1e0","io.kubernetes.cri.sandbox-memory":"178257920","io.kubernetes.cri.sandbox-name":"coredns-5dd5756b68-ts5dh","io.kubernetes.cri.sandbox-namespace":"kube-system","io.kubernetes.cri.sandbox-uid":"daaa92f6-6743-4e3e-a5e9-0d3bea42c1e0"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"5eee9bb6b890be25088c43c50c60ee0dd94c9533df6a93fd95e571b722f603e8","pid":1174,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/5eee9bb6b890be25088c43c50c60ee0dd94c9533df6a93fd95e571b
722f603e8","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/5eee9bb6b890be25088c43c50c60ee0dd94c9533df6a93fd95e571b722f603e8/rootfs","created":"2023-12-01T18:59:00.426326168Z","annotations":{"io.kubernetes.cri.container-type":"sandbox","io.kubernetes.cri.sandbox-cpu-period":"100000","io.kubernetes.cri.sandbox-cpu-quota":"0","io.kubernetes.cri.sandbox-cpu-shares":"102","io.kubernetes.cri.sandbox-id":"5eee9bb6b890be25088c43c50c60ee0dd94c9533df6a93fd95e571b722f603e8","io.kubernetes.cri.sandbox-log-directory":"/var/log/pods/kube-system_etcd-functional-616785_7d7369424179d18b810696b6cd5e0c34","io.kubernetes.cri.sandbox-memory":"0","io.kubernetes.cri.sandbox-name":"etcd-functional-616785","io.kubernetes.cri.sandbox-namespace":"kube-system","io.kubernetes.cri.sandbox-uid":"7d7369424179d18b810696b6cd5e0c34"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"6353341ab4be94e3126a82f86e5b7b6c810a95a11c1532a6524b40001c56ec1e","pid":1327,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.tas
k/k8s.io/6353341ab4be94e3126a82f86e5b7b6c810a95a11c1532a6524b40001c56ec1e","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/6353341ab4be94e3126a82f86e5b7b6c810a95a11c1532a6524b40001c56ec1e/rootfs","created":"2023-12-01T18:59:00.685840617Z","annotations":{"io.kubernetes.cri.container-name":"kube-scheduler","io.kubernetes.cri.container-type":"container","io.kubernetes.cri.image-name":"registry.k8s.io/kube-scheduler:v1.28.4","io.kubernetes.cri.sandbox-id":"98d1c2867c3ea4c91188d8e481c880102506a3796f45f4a9f7a39603f311a3ec","io.kubernetes.cri.sandbox-name":"kube-scheduler-functional-616785","io.kubernetes.cri.sandbox-namespace":"kube-system","io.kubernetes.cri.sandbox-uid":"b5714ae4c71e32df6a08d33071c9d40f"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"6f24a29e8dde904ee7c79191a05e4233a4d3c4611dec66af1e784899ffd2f4fa","pid":2137,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/6f24a29e8dde904ee7c79191a05e4233a4d3c4611dec66af1e784899ffd2f4fa","rootfs":"/run/containerd
/io.containerd.runtime.v2.task/k8s.io/6f24a29e8dde904ee7c79191a05e4233a4d3c4611dec66af1e784899ffd2f4fa/rootfs","created":"2023-12-01T18:59:37.566412994Z","annotations":{"io.kubernetes.cri.container-name":"coredns","io.kubernetes.cri.container-type":"container","io.kubernetes.cri.image-name":"registry.k8s.io/coredns/coredns:v1.10.1","io.kubernetes.cri.sandbox-id":"35fdb6cba245d4380dd978af7332e0abf05039d07409269e23de390bfcaa6950","io.kubernetes.cri.sandbox-name":"coredns-5dd5756b68-ts5dh","io.kubernetes.cri.sandbox-namespace":"kube-system","io.kubernetes.cri.sandbox-uid":"daaa92f6-6743-4e3e-a5e9-0d3bea42c1e0"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"8b55a4531eadf2b7b7ac4b6e98d285594d33022051b789a55aac0bc3c5faebf2","pid":1704,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/8b55a4531eadf2b7b7ac4b6e98d285594d33022051b789a55aac0bc3c5faebf2","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/8b55a4531eadf2b7b7ac4b6e98d285594d33022051b789a55aac0bc3c5faebf2/rootfs"
,"created":"2023-12-01T18:59:22.992763511Z","annotations":{"io.kubernetes.cri.container-type":"sandbox","io.kubernetes.cri.sandbox-cpu-period":"100000","io.kubernetes.cri.sandbox-cpu-quota":"0","io.kubernetes.cri.sandbox-cpu-shares":"2","io.kubernetes.cri.sandbox-id":"8b55a4531eadf2b7b7ac4b6e98d285594d33022051b789a55aac0bc3c5faebf2","io.kubernetes.cri.sandbox-log-directory":"/var/log/pods/kube-system_storage-provisioner_393607ea-a066-4d39-93eb-75c52a6ab29e","io.kubernetes.cri.sandbox-memory":"0","io.kubernetes.cri.sandbox-name":"storage-provisioner","io.kubernetes.cri.sandbox-namespace":"kube-system","io.kubernetes.cri.sandbox-uid":"393607ea-a066-4d39-93eb-75c52a6ab29e"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"98d1c2867c3ea4c91188d8e481c880102506a3796f45f4a9f7a39603f311a3ec","pid":1175,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/98d1c2867c3ea4c91188d8e481c880102506a3796f45f4a9f7a39603f311a3ec","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/98d1c286
7c3ea4c91188d8e481c880102506a3796f45f4a9f7a39603f311a3ec/rootfs","created":"2023-12-01T18:59:00.41758843Z","annotations":{"io.kubernetes.cri.container-type":"sandbox","io.kubernetes.cri.sandbox-cpu-period":"100000","io.kubernetes.cri.sandbox-cpu-quota":"0","io.kubernetes.cri.sandbox-cpu-shares":"102","io.kubernetes.cri.sandbox-id":"98d1c2867c3ea4c91188d8e481c880102506a3796f45f4a9f7a39603f311a3ec","io.kubernetes.cri.sandbox-log-directory":"/var/log/pods/kube-system_kube-scheduler-functional-616785_b5714ae4c71e32df6a08d33071c9d40f","io.kubernetes.cri.sandbox-memory":"0","io.kubernetes.cri.sandbox-name":"kube-scheduler-functional-616785","io.kubernetes.cri.sandbox-namespace":"kube-system","io.kubernetes.cri.sandbox-uid":"b5714ae4c71e32df6a08d33071c9d40f"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"9efdb32584bcd1c4938cb1074c27c1b3d95cdc91deea1013f2b99669c0621313","pid":1871,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/9efdb32584bcd1c4938cb1074c27c1b3d95cdc91deea1013f2b996
69c0621313","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/9efdb32584bcd1c4938cb1074c27c1b3d95cdc91deea1013f2b99669c0621313/rootfs","created":"2023-12-01T18:59:23.51488378Z","annotations":{"io.kubernetes.cri.container-name":"kindnet-cni","io.kubernetes.cri.container-type":"container","io.kubernetes.cri.image-name":"docker.io/kindest/kindnetd:v20230809-80a64d96","io.kubernetes.cri.sandbox-id":"1ade3bcae44a0456b38605717b59a54930e914ca80145f4a6a42f69f2c89bc4c","io.kubernetes.cri.sandbox-name":"kindnet-6zm7x","io.kubernetes.cri.sandbox-namespace":"kube-system","io.kubernetes.cri.sandbox-uid":"e02380f9-bcfc-4a99-a5c0-e2372e5f0ad4"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"a01aebe75ba55e11656030032922a9fe89ca68f6b88e61a6565e71e38c259849","pid":1250,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/a01aebe75ba55e11656030032922a9fe89ca68f6b88e61a6565e71e38c259849","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/a01aebe75ba55e11656030032922a9fe89ca6
8f6b88e61a6565e71e38c259849/rootfs","created":"2023-12-01T18:59:00.531247634Z","annotations":{"io.kubernetes.cri.container-name":"kube-controller-manager","io.kubernetes.cri.container-type":"container","io.kubernetes.cri.image-name":"registry.k8s.io/kube-controller-manager:v1.28.4","io.kubernetes.cri.sandbox-id":"c8f61359ddffa0351dabf4ab914d2d3c8bbd85a95648cf27a519b32bfd41bd9e","io.kubernetes.cri.sandbox-name":"kube-controller-manager-functional-616785","io.kubernetes.cri.sandbox-namespace":"kube-system","io.kubernetes.cri.sandbox-uid":"012a17049c3f357a0b12f711f68b3301"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"c8f61359ddffa0351dabf4ab914d2d3c8bbd85a95648cf27a519b32bfd41bd9e","pid":1159,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/c8f61359ddffa0351dabf4ab914d2d3c8bbd85a95648cf27a519b32bfd41bd9e","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/c8f61359ddffa0351dabf4ab914d2d3c8bbd85a95648cf27a519b32bfd41bd9e/rootfs","created":"2023-12-01T18:59:00.393035
366Z","annotations":{"io.kubernetes.cri.container-type":"sandbox","io.kubernetes.cri.sandbox-cpu-period":"100000","io.kubernetes.cri.sandbox-cpu-quota":"0","io.kubernetes.cri.sandbox-cpu-shares":"204","io.kubernetes.cri.sandbox-id":"c8f61359ddffa0351dabf4ab914d2d3c8bbd85a95648cf27a519b32bfd41bd9e","io.kubernetes.cri.sandbox-log-directory":"/var/log/pods/kube-system_kube-controller-manager-functional-616785_012a17049c3f357a0b12f711f68b3301","io.kubernetes.cri.sandbox-memory":"0","io.kubernetes.cri.sandbox-name":"kube-controller-manager-functional-616785","io.kubernetes.cri.sandbox-namespace":"kube-system","io.kubernetes.cri.sandbox-uid":"012a17049c3f357a0b12f711f68b3301"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"d7b2917b16b33132226cd5898d50cbfcba0d36be4be642e783846162063ff175","pid":1811,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/d7b2917b16b33132226cd5898d50cbfcba0d36be4be642e783846162063ff175","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/d7b2917b
16b33132226cd5898d50cbfcba0d36be4be642e783846162063ff175/rootfs","created":"2023-12-01T18:59:23.291168037Z","annotations":{"io.kubernetes.cri.container-type":"sandbox","io.kubernetes.cri.sandbox-cpu-period":"100000","io.kubernetes.cri.sandbox-cpu-quota":"0","io.kubernetes.cri.sandbox-cpu-shares":"2","io.kubernetes.cri.sandbox-id":"d7b2917b16b33132226cd5898d50cbfcba0d36be4be642e783846162063ff175","io.kubernetes.cri.sandbox-log-directory":"/var/log/pods/kube-system_kube-proxy-d8cvf_f95b7b7d-84db-44db-8038-dcfd7e1ab770","io.kubernetes.cri.sandbox-memory":"0","io.kubernetes.cri.sandbox-name":"kube-proxy-d8cvf","io.kubernetes.cri.sandbox-namespace":"kube-system","io.kubernetes.cri.sandbox-uid":"f95b7b7d-84db-44db-8038-dcfd7e1ab770"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"f39292a0989585111b50d4115a545d232df69b9f78a22cca8cad460e8ee16199","pid":1177,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/f39292a0989585111b50d4115a545d232df69b9f78a22cca8cad460e8ee16199","rootfs":"/ru
n/containerd/io.containerd.runtime.v2.task/k8s.io/f39292a0989585111b50d4115a545d232df69b9f78a22cca8cad460e8ee16199/rootfs","created":"2023-12-01T18:59:00.405457838Z","annotations":{"io.kubernetes.cri.container-type":"sandbox","io.kubernetes.cri.sandbox-cpu-period":"100000","io.kubernetes.cri.sandbox-cpu-quota":"0","io.kubernetes.cri.sandbox-cpu-shares":"256","io.kubernetes.cri.sandbox-id":"f39292a0989585111b50d4115a545d232df69b9f78a22cca8cad460e8ee16199","io.kubernetes.cri.sandbox-log-directory":"/var/log/pods/kube-system_kube-apiserver-functional-616785_399196d44eb6c509f7b60d72c5662125","io.kubernetes.cri.sandbox-memory":"0","io.kubernetes.cri.sandbox-name":"kube-apiserver-functional-616785","io.kubernetes.cri.sandbox-namespace":"kube-system","io.kubernetes.cri.sandbox-uid":"399196d44eb6c509f7b60d72c5662125"},"owner":"root"}]
	I1201 18:59:59.654895  284652 cri.go:126] list returned 16 containers
	I1201 18:59:59.654904  284652 cri.go:129] container: {ID:0c2d4da8c8cce66a5ddb65b0ca03546edefdf740f59669699af3f7f312913564 Status:running}
	I1201 18:59:59.654916  284652 cri.go:135] skipping {0c2d4da8c8cce66a5ddb65b0ca03546edefdf740f59669699af3f7f312913564 running}: state = "running", want "paused"
	I1201 18:59:59.654924  284652 cri.go:129] container: {ID:172a910da74d5d4bd72533e43c42af2574c81da2cb8735f3148fb22366fc7b38 Status:running}
	I1201 18:59:59.654930  284652 cri.go:135] skipping {172a910da74d5d4bd72533e43c42af2574c81da2cb8735f3148fb22366fc7b38 running}: state = "running", want "paused"
	I1201 18:59:59.654935  284652 cri.go:129] container: {ID:1ade3bcae44a0456b38605717b59a54930e914ca80145f4a6a42f69f2c89bc4c Status:running}
	I1201 18:59:59.654940  284652 cri.go:131] skipping 1ade3bcae44a0456b38605717b59a54930e914ca80145f4a6a42f69f2c89bc4c - not in ps
	I1201 18:59:59.654945  284652 cri.go:129] container: {ID:1fb932a179c7041614a4fcc5715379bec1bebfa33c81139f5df677e9fa11945f Status:running}
	I1201 18:59:59.654950  284652 cri.go:135] skipping {1fb932a179c7041614a4fcc5715379bec1bebfa33c81139f5df677e9fa11945f running}: state = "running", want "paused"
	I1201 18:59:59.654956  284652 cri.go:129] container: {ID:2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648 Status:running}
	I1201 18:59:59.654961  284652 cri.go:135] skipping {2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648 running}: state = "running", want "paused"
	I1201 18:59:59.654966  284652 cri.go:129] container: {ID:35fdb6cba245d4380dd978af7332e0abf05039d07409269e23de390bfcaa6950 Status:running}
	I1201 18:59:59.654971  284652 cri.go:131] skipping 35fdb6cba245d4380dd978af7332e0abf05039d07409269e23de390bfcaa6950 - not in ps
	I1201 18:59:59.654976  284652 cri.go:129] container: {ID:5eee9bb6b890be25088c43c50c60ee0dd94c9533df6a93fd95e571b722f603e8 Status:running}
	I1201 18:59:59.654981  284652 cri.go:131] skipping 5eee9bb6b890be25088c43c50c60ee0dd94c9533df6a93fd95e571b722f603e8 - not in ps
	I1201 18:59:59.654986  284652 cri.go:129] container: {ID:6353341ab4be94e3126a82f86e5b7b6c810a95a11c1532a6524b40001c56ec1e Status:running}
	I1201 18:59:59.654991  284652 cri.go:135] skipping {6353341ab4be94e3126a82f86e5b7b6c810a95a11c1532a6524b40001c56ec1e running}: state = "running", want "paused"
	I1201 18:59:59.654996  284652 cri.go:129] container: {ID:6f24a29e8dde904ee7c79191a05e4233a4d3c4611dec66af1e784899ffd2f4fa Status:running}
	I1201 18:59:59.655004  284652 cri.go:135] skipping {6f24a29e8dde904ee7c79191a05e4233a4d3c4611dec66af1e784899ffd2f4fa running}: state = "running", want "paused"
	I1201 18:59:59.655009  284652 cri.go:129] container: {ID:8b55a4531eadf2b7b7ac4b6e98d285594d33022051b789a55aac0bc3c5faebf2 Status:running}
	I1201 18:59:59.655015  284652 cri.go:131] skipping 8b55a4531eadf2b7b7ac4b6e98d285594d33022051b789a55aac0bc3c5faebf2 - not in ps
	I1201 18:59:59.655019  284652 cri.go:129] container: {ID:98d1c2867c3ea4c91188d8e481c880102506a3796f45f4a9f7a39603f311a3ec Status:running}
	I1201 18:59:59.655025  284652 cri.go:131] skipping 98d1c2867c3ea4c91188d8e481c880102506a3796f45f4a9f7a39603f311a3ec - not in ps
	I1201 18:59:59.655029  284652 cri.go:129] container: {ID:9efdb32584bcd1c4938cb1074c27c1b3d95cdc91deea1013f2b99669c0621313 Status:running}
	I1201 18:59:59.655034  284652 cri.go:135] skipping {9efdb32584bcd1c4938cb1074c27c1b3d95cdc91deea1013f2b99669c0621313 running}: state = "running", want "paused"
	I1201 18:59:59.655039  284652 cri.go:129] container: {ID:a01aebe75ba55e11656030032922a9fe89ca68f6b88e61a6565e71e38c259849 Status:running}
	I1201 18:59:59.655045  284652 cri.go:135] skipping {a01aebe75ba55e11656030032922a9fe89ca68f6b88e61a6565e71e38c259849 running}: state = "running", want "paused"
	I1201 18:59:59.655050  284652 cri.go:129] container: {ID:c8f61359ddffa0351dabf4ab914d2d3c8bbd85a95648cf27a519b32bfd41bd9e Status:running}
	I1201 18:59:59.655055  284652 cri.go:131] skipping c8f61359ddffa0351dabf4ab914d2d3c8bbd85a95648cf27a519b32bfd41bd9e - not in ps
	I1201 18:59:59.655059  284652 cri.go:129] container: {ID:d7b2917b16b33132226cd5898d50cbfcba0d36be4be642e783846162063ff175 Status:running}
	I1201 18:59:59.655065  284652 cri.go:131] skipping d7b2917b16b33132226cd5898d50cbfcba0d36be4be642e783846162063ff175 - not in ps
	I1201 18:59:59.655069  284652 cri.go:129] container: {ID:f39292a0989585111b50d4115a545d232df69b9f78a22cca8cad460e8ee16199 Status:running}
	I1201 18:59:59.655075  284652 cri.go:131] skipping f39292a0989585111b50d4115a545d232df69b9f78a22cca8cad460e8ee16199 - not in ps
	I1201 18:59:59.655127  284652 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
	I1201 18:59:59.665845  284652 kubeadm.go:419] found existing configuration files, will attempt cluster restart
	I1201 18:59:59.665862  284652 kubeadm.go:636] restartCluster start
	I1201 18:59:59.665916  284652 ssh_runner.go:195] Run: sudo test -d /data/minikube
	I1201 18:59:59.675964  284652 kubeadm.go:127] /data/minikube skipping compat symlinks: sudo test -d /data/minikube: Process exited with status 1
	stdout:
	
	stderr:
	I1201 18:59:59.676524  284652 kubeconfig.go:92] found "functional-616785" server: "https://192.168.49.2:8441"
	I1201 18:59:59.677957  284652 ssh_runner.go:195] Run: sudo diff -u /var/tmp/minikube/kubeadm.yaml /var/tmp/minikube/kubeadm.yaml.new
	I1201 18:59:59.688997  284652 kubeadm.go:602] needs reconfigure: configs differ:
	-- stdout --
	--- /var/tmp/minikube/kubeadm.yaml	2023-12-01 18:58:51.889300352 +0000
	+++ /var/tmp/minikube/kubeadm.yaml.new	2023-12-01 18:59:59.029296118 +0000
	@@ -22,7 +22,7 @@
	 apiServer:
	   certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
	   extraArgs:
	-    enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
	+    enable-admission-plugins: "NamespaceAutoProvision"
	 controllerManager:
	   extraArgs:
	     allocate-node-cidrs: "true"
	
	-- /stdout --
	I1201 18:59:59.689008  284652 kubeadm.go:1135] stopping kube-system containers ...
	I1201 18:59:59.689018  284652 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name: Namespaces:[kube-system]}
	I1201 18:59:59.689071  284652 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
	I1201 18:59:59.732442  284652 cri.go:89] found id: "172a910da74d5d4bd72533e43c42af2574c81da2cb8735f3148fb22366fc7b38"
	I1201 18:59:59.732454  284652 cri.go:89] found id: "6f24a29e8dde904ee7c79191a05e4233a4d3c4611dec66af1e784899ffd2f4fa"
	I1201 18:59:59.732484  284652 cri.go:89] found id: "9efdb32584bcd1c4938cb1074c27c1b3d95cdc91deea1013f2b99669c0621313"
	I1201 18:59:59.732488  284652 cri.go:89] found id: "1fb932a179c7041614a4fcc5715379bec1bebfa33c81139f5df677e9fa11945f"
	I1201 18:59:59.732491  284652 cri.go:89] found id: "214de4fd9c7633a02aa05db38f69168fa0962e31a183a34b3a3afb40315ef80e"
	I1201 18:59:59.732495  284652 cri.go:89] found id: "0c2d4da8c8cce66a5ddb65b0ca03546edefdf740f59669699af3f7f312913564"
	I1201 18:59:59.732498  284652 cri.go:89] found id: "2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648"
	I1201 18:59:59.732501  284652 cri.go:89] found id: "6353341ab4be94e3126a82f86e5b7b6c810a95a11c1532a6524b40001c56ec1e"
	I1201 18:59:59.732504  284652 cri.go:89] found id: "a01aebe75ba55e11656030032922a9fe89ca68f6b88e61a6565e71e38c259849"
	I1201 18:59:59.732510  284652 cri.go:89] found id: ""
	I1201 18:59:59.732515  284652 cri.go:234] Stopping containers: [172a910da74d5d4bd72533e43c42af2574c81da2cb8735f3148fb22366fc7b38 6f24a29e8dde904ee7c79191a05e4233a4d3c4611dec66af1e784899ffd2f4fa 9efdb32584bcd1c4938cb1074c27c1b3d95cdc91deea1013f2b99669c0621313 1fb932a179c7041614a4fcc5715379bec1bebfa33c81139f5df677e9fa11945f 214de4fd9c7633a02aa05db38f69168fa0962e31a183a34b3a3afb40315ef80e 0c2d4da8c8cce66a5ddb65b0ca03546edefdf740f59669699af3f7f312913564 2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648 6353341ab4be94e3126a82f86e5b7b6c810a95a11c1532a6524b40001c56ec1e a01aebe75ba55e11656030032922a9fe89ca68f6b88e61a6565e71e38c259849]
	I1201 18:59:59.732566  284652 ssh_runner.go:195] Run: which crictl
	I1201 18:59:59.737048  284652 ssh_runner.go:195] Run: sudo /usr/bin/crictl stop --timeout=10 172a910da74d5d4bd72533e43c42af2574c81da2cb8735f3148fb22366fc7b38 6f24a29e8dde904ee7c79191a05e4233a4d3c4611dec66af1e784899ffd2f4fa 9efdb32584bcd1c4938cb1074c27c1b3d95cdc91deea1013f2b99669c0621313 1fb932a179c7041614a4fcc5715379bec1bebfa33c81139f5df677e9fa11945f 214de4fd9c7633a02aa05db38f69168fa0962e31a183a34b3a3afb40315ef80e 0c2d4da8c8cce66a5ddb65b0ca03546edefdf740f59669699af3f7f312913564 2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648 6353341ab4be94e3126a82f86e5b7b6c810a95a11c1532a6524b40001c56ec1e a01aebe75ba55e11656030032922a9fe89ca68f6b88e61a6565e71e38c259849
	I1201 19:00:04.993512  284652 ssh_runner.go:235] Completed: sudo /usr/bin/crictl stop --timeout=10 172a910da74d5d4bd72533e43c42af2574c81da2cb8735f3148fb22366fc7b38 6f24a29e8dde904ee7c79191a05e4233a4d3c4611dec66af1e784899ffd2f4fa 9efdb32584bcd1c4938cb1074c27c1b3d95cdc91deea1013f2b99669c0621313 1fb932a179c7041614a4fcc5715379bec1bebfa33c81139f5df677e9fa11945f 214de4fd9c7633a02aa05db38f69168fa0962e31a183a34b3a3afb40315ef80e 0c2d4da8c8cce66a5ddb65b0ca03546edefdf740f59669699af3f7f312913564 2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648 6353341ab4be94e3126a82f86e5b7b6c810a95a11c1532a6524b40001c56ec1e a01aebe75ba55e11656030032922a9fe89ca68f6b88e61a6565e71e38c259849: (5.256425175s)
	W1201 19:00:04.993567  284652 kubeadm.go:689] Failed to stop kube-system containers: port conflicts may arise: stop: crictl: sudo /usr/bin/crictl stop --timeout=10 172a910da74d5d4bd72533e43c42af2574c81da2cb8735f3148fb22366fc7b38 6f24a29e8dde904ee7c79191a05e4233a4d3c4611dec66af1e784899ffd2f4fa 9efdb32584bcd1c4938cb1074c27c1b3d95cdc91deea1013f2b99669c0621313 1fb932a179c7041614a4fcc5715379bec1bebfa33c81139f5df677e9fa11945f 214de4fd9c7633a02aa05db38f69168fa0962e31a183a34b3a3afb40315ef80e 0c2d4da8c8cce66a5ddb65b0ca03546edefdf740f59669699af3f7f312913564 2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648 6353341ab4be94e3126a82f86e5b7b6c810a95a11c1532a6524b40001c56ec1e a01aebe75ba55e11656030032922a9fe89ca68f6b88e61a6565e71e38c259849: Process exited with status 1
	stdout:
	172a910da74d5d4bd72533e43c42af2574c81da2cb8735f3148fb22366fc7b38
	6f24a29e8dde904ee7c79191a05e4233a4d3c4611dec66af1e784899ffd2f4fa
	9efdb32584bcd1c4938cb1074c27c1b3d95cdc91deea1013f2b99669c0621313
	1fb932a179c7041614a4fcc5715379bec1bebfa33c81139f5df677e9fa11945f
	
	stderr:
	E1201 19:00:04.990149    3390 remote_runtime.go:505] "StopContainer from runtime service failed" err="rpc error: code = NotFound desc = an error occurred when try to find container \"214de4fd9c7633a02aa05db38f69168fa0962e31a183a34b3a3afb40315ef80e\": not found" containerID="214de4fd9c7633a02aa05db38f69168fa0962e31a183a34b3a3afb40315ef80e"
	time="2023-12-01T19:00:04Z" level=fatal msg="stopping the container \"214de4fd9c7633a02aa05db38f69168fa0962e31a183a34b3a3afb40315ef80e\": rpc error: code = NotFound desc = an error occurred when try to find container \"214de4fd9c7633a02aa05db38f69168fa0962e31a183a34b3a3afb40315ef80e\": not found"
	I1201 19:00:04.993641  284652 ssh_runner.go:195] Run: sudo systemctl stop kubelet
	I1201 19:00:05.065032  284652 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
	I1201 19:00:05.077689  284652 kubeadm.go:155] found existing configuration files:
	-rw------- 1 root root 5639 Dec  1 18:58 /etc/kubernetes/admin.conf
	-rw------- 1 root root 5652 Dec  1 18:58 /etc/kubernetes/controller-manager.conf
	-rw------- 1 root root 2007 Dec  1 18:59 /etc/kubernetes/kubelet.conf
	-rw------- 1 root root 5604 Dec  1 18:58 /etc/kubernetes/scheduler.conf
	
	I1201 19:00:05.077752  284652 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/admin.conf
	I1201 19:00:05.090161  284652 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/kubelet.conf
	I1201 19:00:05.103373  284652 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/controller-manager.conf
	I1201 19:00:05.117210  284652 kubeadm.go:166] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/controller-manager.conf: Process exited with status 1
	stdout:
	
	stderr:
	I1201 19:00:05.117283  284652 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
	I1201 19:00:05.128614  284652 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/scheduler.conf
	I1201 19:00:05.140034  284652 kubeadm.go:166] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/scheduler.conf: Process exited with status 1
	stdout:
	
	stderr:
	I1201 19:00:05.140099  284652 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
	I1201 19:00:05.151011  284652 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
	I1201 19:00:05.162778  284652 kubeadm.go:713] reconfiguring cluster from /var/tmp/minikube/kubeadm.yaml
	I1201 19:00:05.162793  284652 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.28.4:$PATH" kubeadm init phase certs all --config /var/tmp/minikube/kubeadm.yaml"
	I1201 19:00:05.233115  284652 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.28.4:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml"
	I1201 19:00:06.825034  284652 ssh_runner.go:235] Completed: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.28.4:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml": (1.59189502s)
	I1201 19:00:06.825055  284652 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.28.4:$PATH" kubeadm init phase kubelet-start --config /var/tmp/minikube/kubeadm.yaml"
	I1201 19:00:07.038006  284652 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.28.4:$PATH" kubeadm init phase control-plane all --config /var/tmp/minikube/kubeadm.yaml"
	I1201 19:00:07.122641  284652 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.28.4:$PATH" kubeadm init phase etcd local --config /var/tmp/minikube/kubeadm.yaml"
	I1201 19:00:07.253505  284652 api_server.go:52] waiting for apiserver process to appear ...
	I1201 19:00:07.253578  284652 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I1201 19:00:07.275979  284652 api_server.go:72] duration metric: took 22.472124ms to wait for apiserver process to appear ...
	I1201 19:00:07.275993  284652 api_server.go:88] waiting for apiserver healthz status ...
	I1201 19:00:07.276008  284652 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8441/healthz ...
	I1201 19:00:07.285934  284652 api_server.go:279] https://192.168.49.2:8441/healthz returned 200:
	ok
	I1201 19:00:07.301513  284652 api_server.go:141] control plane version: v1.28.4
	I1201 19:00:07.301534  284652 api_server.go:131] duration metric: took 25.535303ms to wait for apiserver health ...
	I1201 19:00:07.301543  284652 cni.go:84] Creating CNI manager for ""
	I1201 19:00:07.301549  284652 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
	I1201 19:00:07.304402  284652 out.go:177] * Configuring CNI (Container Networking Interface) ...
	I1201 19:00:07.306765  284652 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
	I1201 19:00:07.312170  284652 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.28.4/kubectl ...
	I1201 19:00:07.312181  284652 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2438 bytes)
	I1201 19:00:07.355767  284652 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
	I1201 19:00:07.758953  284652 system_pods.go:43] waiting for kube-system pods to appear ...
	I1201 19:00:07.767241  284652 system_pods.go:59] 8 kube-system pods found
	I1201 19:00:07.767257  284652 system_pods.go:61] "coredns-5dd5756b68-ts5dh" [daaa92f6-6743-4e3e-a5e9-0d3bea42c1e0] Running
	I1201 19:00:07.767262  284652 system_pods.go:61] "etcd-functional-616785" [2cbc9b55-312b-419c-ae0c-a91f984dba54] Running
	I1201 19:00:07.767266  284652 system_pods.go:61] "kindnet-6zm7x" [e02380f9-bcfc-4a99-a5c0-e2372e5f0ad4] Running
	I1201 19:00:07.767270  284652 system_pods.go:61] "kube-apiserver-functional-616785" [7edc0134-41d4-48f6-bd02-45f6b11b6156] Running
	I1201 19:00:07.767276  284652 system_pods.go:61] "kube-controller-manager-functional-616785" [5a86626c-86fd-4a54-a1ad-647ab01f2623] Running
	I1201 19:00:07.767280  284652 system_pods.go:61] "kube-proxy-d8cvf" [f95b7b7d-84db-44db-8038-dcfd7e1ab770] Running
	I1201 19:00:07.767284  284652 system_pods.go:61] "kube-scheduler-functional-616785" [0b89a865-9935-4050-a38f-54d940cd0bd0] Running
	I1201 19:00:07.767291  284652 system_pods.go:61] "storage-provisioner" [393607ea-a066-4d39-93eb-75c52a6ab29e] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
	I1201 19:00:07.767297  284652 system_pods.go:74] duration metric: took 8.333367ms to wait for pod list to return data ...
	I1201 19:00:07.767304  284652 node_conditions.go:102] verifying NodePressure condition ...
	I1201 19:00:07.770831  284652 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
	I1201 19:00:07.770851  284652 node_conditions.go:123] node cpu capacity is 2
	I1201 19:00:07.770860  284652 node_conditions.go:105] duration metric: took 3.551243ms to run NodePressure ...
	I1201 19:00:07.770885  284652 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.28.4:$PATH" kubeadm init phase addon all --config /var/tmp/minikube/kubeadm.yaml"
	I1201 19:00:08.002951  284652 kubeadm.go:772] waiting for restarted kubelet to initialise ...
	I1201 19:00:08.009183  284652 retry.go:31] will retry after 310.10446ms: kubelet not initialised
	I1201 19:00:08.328265  284652 kubeadm.go:787] kubelet initialised
	I1201 19:00:08.328276  284652 kubeadm.go:788] duration metric: took 325.312258ms waiting for restarted kubelet to initialise ...
	I1201 19:00:08.328285  284652 pod_ready.go:35] extra waiting up to 4m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
	I1201 19:00:08.345770  284652 pod_ready.go:78] waiting up to 4m0s for pod "coredns-5dd5756b68-ts5dh" in "kube-system" namespace to be "Ready" ...
	I1201 19:00:08.373459  284652 pod_ready.go:97] node "functional-616785" hosting pod "coredns-5dd5756b68-ts5dh" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-616785" has status "Ready":"False"
	I1201 19:00:08.373474  284652 pod_ready.go:81] duration metric: took 27.689694ms waiting for pod "coredns-5dd5756b68-ts5dh" in "kube-system" namespace to be "Ready" ...
	E1201 19:00:08.373484  284652 pod_ready.go:66] WaitExtra: waitPodCondition: node "functional-616785" hosting pod "coredns-5dd5756b68-ts5dh" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-616785" has status "Ready":"False"
	I1201 19:00:08.373554  284652 pod_ready.go:78] waiting up to 4m0s for pod "etcd-functional-616785" in "kube-system" namespace to be "Ready" ...
	I1201 19:00:08.390780  284652 pod_ready.go:97] node "functional-616785" hosting pod "etcd-functional-616785" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-616785" has status "Ready":"False"
	I1201 19:00:08.390798  284652 pod_ready.go:81] duration metric: took 17.233949ms waiting for pod "etcd-functional-616785" in "kube-system" namespace to be "Ready" ...
	E1201 19:00:08.390808  284652 pod_ready.go:66] WaitExtra: waitPodCondition: node "functional-616785" hosting pod "etcd-functional-616785" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-616785" has status "Ready":"False"
	I1201 19:00:08.390887  284652 pod_ready.go:78] waiting up to 4m0s for pod "kube-apiserver-functional-616785" in "kube-system" namespace to be "Ready" ...
	I1201 19:00:08.403055  284652 pod_ready.go:97] node "functional-616785" hosting pod "kube-apiserver-functional-616785" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-616785" has status "Ready":"False"
	I1201 19:00:08.403071  284652 pod_ready.go:81] duration metric: took 12.177704ms waiting for pod "kube-apiserver-functional-616785" in "kube-system" namespace to be "Ready" ...
	E1201 19:00:08.403088  284652 pod_ready.go:66] WaitExtra: waitPodCondition: node "functional-616785" hosting pod "kube-apiserver-functional-616785" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-616785" has status "Ready":"False"
	I1201 19:00:08.409677  284652 pod_ready.go:78] waiting up to 4m0s for pod "kube-controller-manager-functional-616785" in "kube-system" namespace to be "Ready" ...
	I1201 19:00:08.421630  284652 pod_ready.go:97] node "functional-616785" hosting pod "kube-controller-manager-functional-616785" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-616785" has status "Ready":"False"
	I1201 19:00:08.421648  284652 pod_ready.go:81] duration metric: took 11.955662ms waiting for pod "kube-controller-manager-functional-616785" in "kube-system" namespace to be "Ready" ...
	E1201 19:00:08.421661  284652 pod_ready.go:66] WaitExtra: waitPodCondition: node "functional-616785" hosting pod "kube-controller-manager-functional-616785" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-616785" has status "Ready":"False"
	I1201 19:00:08.421725  284652 pod_ready.go:78] waiting up to 4m0s for pod "kube-proxy-d8cvf" in "kube-system" namespace to be "Ready" ...
	I1201 19:00:08.765278  284652 pod_ready.go:97] node "functional-616785" hosting pod "kube-proxy-d8cvf" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-616785" has status "Ready":"False"
	I1201 19:00:08.765292  284652 pod_ready.go:81] duration metric: took 343.558835ms waiting for pod "kube-proxy-d8cvf" in "kube-system" namespace to be "Ready" ...
	E1201 19:00:08.765301  284652 pod_ready.go:66] WaitExtra: waitPodCondition: node "functional-616785" hosting pod "kube-proxy-d8cvf" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-616785" has status "Ready":"False"
	I1201 19:00:08.765331  284652 pod_ready.go:78] waiting up to 4m0s for pod "kube-scheduler-functional-616785" in "kube-system" namespace to be "Ready" ...
	I1201 19:00:09.162488  284652 pod_ready.go:97] node "functional-616785" hosting pod "kube-scheduler-functional-616785" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-616785" has status "Ready":"False"
	I1201 19:00:09.162502  284652 pod_ready.go:81] duration metric: took 397.164454ms waiting for pod "kube-scheduler-functional-616785" in "kube-system" namespace to be "Ready" ...
	E1201 19:00:09.162511  284652 pod_ready.go:66] WaitExtra: waitPodCondition: node "functional-616785" hosting pod "kube-scheduler-functional-616785" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-616785" has status "Ready":"False"
	I1201 19:00:09.162622  284652 pod_ready.go:38] duration metric: took 834.326101ms for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
	I1201 19:00:09.162642  284652 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
	I1201 19:00:09.173311  284652 ops.go:34] apiserver oom_adj: -16
	I1201 19:00:09.173323  284652 kubeadm.go:640] restartCluster took 9.50745573s
	I1201 19:00:09.173331  284652 kubeadm.go:406] StartCluster complete in 9.596250455s
	I1201 19:00:09.173346  284652 settings.go:142] acquiring lock: {Name:mk509c4de5b63e24c154062001ac3a5a349afe54 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1201 19:00:09.173430  284652 settings.go:150] Updating kubeconfig:  /home/jenkins/minikube-integration/17703-252966/kubeconfig
	I1201 19:00:09.174146  284652 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/17703-252966/kubeconfig: {Name:mk1b3fc1b8f9b6d7245434b6dbdc3c3d1a4130cc Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1201 19:00:09.175278  284652 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.4/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
	I1201 19:00:09.175516  284652 config.go:182] Loaded profile config "functional-616785": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.4
	I1201 19:00:09.175566  284652 addons.go:499] enable addons start: toEnable=map[ambassador:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false helm-tiller:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:false volumesnapshots:false]
	I1201 19:00:09.175629  284652 addons.go:69] Setting storage-provisioner=true in profile "functional-616785"
	I1201 19:00:09.175646  284652 addons.go:231] Setting addon storage-provisioner=true in "functional-616785"
	W1201 19:00:09.175651  284652 addons.go:240] addon storage-provisioner should already be in state true
	I1201 19:00:09.175683  284652 host.go:66] Checking if "functional-616785" exists ...
	I1201 19:00:09.176096  284652 cli_runner.go:164] Run: docker container inspect functional-616785 --format={{.State.Status}}
	I1201 19:00:09.176321  284652 addons.go:69] Setting default-storageclass=true in profile "functional-616785"
	I1201 19:00:09.176336  284652 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "functional-616785"
	I1201 19:00:09.176724  284652 cli_runner.go:164] Run: docker container inspect functional-616785 --format={{.State.Status}}
	I1201 19:00:09.187627  284652 kapi.go:248] "coredns" deployment in "kube-system" namespace and "functional-616785" context rescaled to 1 replicas
	I1201 19:00:09.187661  284652 start.go:223] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.28.4 ContainerRuntime:containerd ControlPlane:true Worker:true}
	I1201 19:00:09.190782  284652 out.go:177] * Verifying Kubernetes components...
	I1201 19:00:09.196710  284652 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I1201 19:00:09.228900  284652 out.go:177]   - Using image gcr.io/k8s-minikube/storage-provisioner:v5
	I1201 19:00:09.230893  284652 addons.go:423] installing /etc/kubernetes/addons/storage-provisioner.yaml
	I1201 19:00:09.230903  284652 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
	I1201 19:00:09.230967  284652 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-616785
	I1201 19:00:09.228236  284652 addons.go:231] Setting addon default-storageclass=true in "functional-616785"
	W1201 19:00:09.231213  284652 addons.go:240] addon default-storageclass should already be in state true
	I1201 19:00:09.231239  284652 host.go:66] Checking if "functional-616785" exists ...
	I1201 19:00:09.231708  284652 cli_runner.go:164] Run: docker container inspect functional-616785 --format={{.State.Status}}
	I1201 19:00:09.268174  284652 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33098 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/functional-616785/id_rsa Username:docker}
	I1201 19:00:09.281791  284652 addons.go:423] installing /etc/kubernetes/addons/storageclass.yaml
	I1201 19:00:09.281803  284652 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
	I1201 19:00:09.281863  284652 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-616785
	I1201 19:00:09.311110  284652 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33098 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/functional-616785/id_rsa Username:docker}
	E1201 19:00:09.475849  284652 start.go:894] failed to get current CoreDNS ConfigMap: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.4/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml": Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8441 was refused - did you specify the right host or port?
	W1201 19:00:09.475869  284652 start.go:294] Unable to inject {"host.minikube.internal": 192.168.49.1} record into CoreDNS: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.4/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml": Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8441 was refused - did you specify the right host or port?
	W1201 19:00:09.475884  284652 out.go:239] Failed to inject host.minikube.internal into CoreDNS, this will limit the pods access to the host IP
	I1201 19:00:09.476107  284652 node_ready.go:35] waiting up to 6m0s for node "functional-616785" to be "Ready" ...
	I1201 19:00:09.476569  284652 node_ready.go:53] error getting node "functional-616785": Get "https://192.168.49.2:8441/api/v1/nodes/functional-616785": dial tcp 192.168.49.2:8441: connect: connection refused
	I1201 19:00:09.476579  284652 node_ready.go:38] duration metric: took 459.025µs waiting for node "functional-616785" to be "Ready" ...
	I1201 19:00:09.479815  284652 out.go:177] 
	W1201 19:00:09.482258  284652 out.go:239] X Exiting due to GUEST_START: failed to start node: wait 6m0s for node: waiting for node to be ready: waitNodeCondition: error getting node "functional-616785": Get "https://192.168.49.2:8441/api/v1/nodes/functional-616785": dial tcp 192.168.49.2:8441: connect: connection refused
	W1201 19:00:09.482426  284652 out.go:239] * 
	W1201 19:00:09.483533  284652 out.go:239] ╭─────────────────────────────────────────────────────────────────────────────────────────────╮
	│                                                                                             │
	│    * If the above advice does not help, please let us know:                                 │
	│      https://github.com/kubernetes/minikube/issues/new/choose                               │
	│                                                                                             │
	│    * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue.    │
	│                                                                                             │
	╰─────────────────────────────────────────────────────────────────────────────────────────────╯
	I1201 19:00:09.486982  284652 out.go:177] 
	
	* 
	* ==> container status <==
	* CONTAINER           IMAGE               CREATED              STATE               NAME                      ATTEMPT             POD ID              POD
	9f4183dd56963       04b4eaa3d3db8       2 seconds ago        Running             kindnet-cni               1                   1ade3bcae44a0       kindnet-6zm7x
	477d114576c31       ba04bb24b9575       2 seconds ago        Running             storage-provisioner       2                   8b55a4531eadf       storage-provisioner
	99140cde269c3       3ca3ca488cf13       2 seconds ago        Running             kube-proxy                1                   d7b2917b16b33       kube-proxy-d8cvf
	a46f9a2d3de02       97e04611ad434       2 seconds ago        Running             coredns                   1                   35fdb6cba245d       coredns-5dd5756b68-ts5dh
	7c7908b790bf5       04b4c447bb9d4       2 seconds ago        Exited              kube-apiserver            1                   c0f80812a4a06       kube-apiserver-functional-616785
	172a910da74d5       ba04bb24b9575       17 seconds ago       Exited              storage-provisioner       1                   8b55a4531eadf       storage-provisioner
	6f24a29e8dde9       97e04611ad434       33 seconds ago       Exited              coredns                   0                   35fdb6cba245d       coredns-5dd5756b68-ts5dh
	9efdb32584bcd       04b4eaa3d3db8       47 seconds ago       Exited              kindnet-cni               0                   1ade3bcae44a0       kindnet-6zm7x
	1fb932a179c70       3ca3ca488cf13       47 seconds ago       Exited              kube-proxy                0                   d7b2917b16b33       kube-proxy-d8cvf
	0c2d4da8c8cce       9cdd6470f48c8       About a minute ago   Running             etcd                      0                   5eee9bb6b890b       etcd-functional-616785
	6353341ab4be9       05c284c929889       About a minute ago   Running             kube-scheduler            0                   98d1c2867c3ea       kube-scheduler-functional-616785
	a01aebe75ba55       9961cbceaf234       About a minute ago   Running             kube-controller-manager   0                   c8f61359ddffa       kube-controller-manager-functional-616785
	
	* 
	* ==> containerd <==
	* Dec 01 19:00:08 functional-616785 containerd[3198]: time="2023-12-01T19:00:08.861207244Z" level=info msg="StartContainer for \"9f4183dd56963e73258dd3b04d0b72d0311cd083e5a23a4f9463bca0138848ba\" returns successfully"
	Dec 01 19:00:08 functional-616785 containerd[3198]: time="2023-12-01T19:00:08.865621717Z" level=info msg="shim disconnected" id=7c7908b790bf54073f1315a8c1586f4746b82b87e895963cb52f53f06a6ddfd3
	Dec 01 19:00:08 functional-616785 containerd[3198]: time="2023-12-01T19:00:08.865906699Z" level=warning msg="cleaning up after shim disconnected" id=7c7908b790bf54073f1315a8c1586f4746b82b87e895963cb52f53f06a6ddfd3 namespace=k8s.io
	Dec 01 19:00:08 functional-616785 containerd[3198]: time="2023-12-01T19:00:08.866029297Z" level=info msg="cleaning up dead shim"
	Dec 01 19:00:08 functional-616785 containerd[3198]: time="2023-12-01T19:00:08.881174091Z" level=warning msg="cleanup warnings time=\"2023-12-01T19:00:08Z\" level=info msg=\"starting signal loop\" namespace=k8s.io pid=3933 runtime=io.containerd.runc.v2\n"
	Dec 01 19:00:09 functional-616785 containerd[3198]: time="2023-12-01T19:00:09.323354246Z" level=info msg="StopContainer for \"2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648\" with timeout 2 (s)"
	Dec 01 19:00:09 functional-616785 containerd[3198]: time="2023-12-01T19:00:09.323774896Z" level=info msg="Stop container \"2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648\" with signal terminated"
	Dec 01 19:00:09 functional-616785 containerd[3198]: time="2023-12-01T19:00:09.427791415Z" level=info msg="shim disconnected" id=f39292a0989585111b50d4115a545d232df69b9f78a22cca8cad460e8ee16199
	Dec 01 19:00:09 functional-616785 containerd[3198]: time="2023-12-01T19:00:09.427846552Z" level=warning msg="cleaning up after shim disconnected" id=f39292a0989585111b50d4115a545d232df69b9f78a22cca8cad460e8ee16199 namespace=k8s.io
	Dec 01 19:00:09 functional-616785 containerd[3198]: time="2023-12-01T19:00:09.428025223Z" level=info msg="cleaning up dead shim"
	Dec 01 19:00:09 functional-616785 containerd[3198]: time="2023-12-01T19:00:09.453212997Z" level=info msg="RemoveContainer for \"ded0a01255c344ae2871870352d750cd87a8083139cfbabb4ebf6e130736185b\""
	Dec 01 19:00:09 functional-616785 containerd[3198]: time="2023-12-01T19:00:09.467644070Z" level=warning msg="cleanup warnings time=\"2023-12-01T19:00:09Z\" level=info msg=\"starting signal loop\" namespace=k8s.io pid=4110 runtime=io.containerd.runc.v2\n"
	Dec 01 19:00:09 functional-616785 containerd[3198]: time="2023-12-01T19:00:09.472148961Z" level=info msg="RemoveContainer for \"ded0a01255c344ae2871870352d750cd87a8083139cfbabb4ebf6e130736185b\" returns successfully"
	Dec 01 19:00:09 functional-616785 containerd[3198]: time="2023-12-01T19:00:09.482076636Z" level=info msg="shim disconnected" id=2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648
	Dec 01 19:00:09 functional-616785 containerd[3198]: time="2023-12-01T19:00:09.482128409Z" level=warning msg="cleaning up after shim disconnected" id=2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648 namespace=k8s.io
	Dec 01 19:00:09 functional-616785 containerd[3198]: time="2023-12-01T19:00:09.482139363Z" level=info msg="cleaning up dead shim"
	Dec 01 19:00:09 functional-616785 containerd[3198]: time="2023-12-01T19:00:09.506842800Z" level=warning msg="cleanup warnings time=\"2023-12-01T19:00:09Z\" level=info msg=\"starting signal loop\" namespace=k8s.io pid=4133 runtime=io.containerd.runc.v2\n"
	Dec 01 19:00:09 functional-616785 containerd[3198]: time="2023-12-01T19:00:09.515807373Z" level=info msg="StopContainer for \"2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648\" returns successfully"
	Dec 01 19:00:09 functional-616785 containerd[3198]: time="2023-12-01T19:00:09.519174470Z" level=info msg="StopPodSandbox for \"f39292a0989585111b50d4115a545d232df69b9f78a22cca8cad460e8ee16199\""
	Dec 01 19:00:09 functional-616785 containerd[3198]: time="2023-12-01T19:00:09.519254501Z" level=info msg="Container to stop \"2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648\" must be in running or unknown state, current state \"CONTAINER_EXITED\""
	Dec 01 19:00:09 functional-616785 containerd[3198]: time="2023-12-01T19:00:09.520802451Z" level=info msg="TearDown network for sandbox \"f39292a0989585111b50d4115a545d232df69b9f78a22cca8cad460e8ee16199\" successfully"
	Dec 01 19:00:09 functional-616785 containerd[3198]: time="2023-12-01T19:00:09.520843238Z" level=info msg="StopPodSandbox for \"f39292a0989585111b50d4115a545d232df69b9f78a22cca8cad460e8ee16199\" returns successfully"
	Dec 01 19:00:10 functional-616785 containerd[3198]: time="2023-12-01T19:00:10.471069539Z" level=info msg="RemoveContainer for \"2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648\""
	Dec 01 19:00:10 functional-616785 containerd[3198]: time="2023-12-01T19:00:10.477687659Z" level=info msg="RemoveContainer for \"2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648\" returns successfully"
	Dec 01 19:00:10 functional-616785 containerd[3198]: time="2023-12-01T19:00:10.478396991Z" level=error msg="ContainerStatus for \"2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648\" failed" error="rpc error: code = NotFound desc = an error occurred when try to find container \"2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648\": not found"
	
	* 
	* ==> coredns [6f24a29e8dde904ee7c79191a05e4233a4d3c4611dec66af1e784899ffd2f4fa] <==
	* .:53
	[INFO] plugin/reload: Running configuration SHA512 = 05e3eaddc414b2d71a69b2e2bc6f2681fc1f4d04bcdd3acc1a41457bb7db518208b95ddfc4c9fffedc59c25a8faf458be1af4915a4a3c0d6777cb7a346bc5d86
	CoreDNS-1.10.1
	linux/arm64, go1.20, 055b2c3
	[INFO] 127.0.0.1:45795 - 10878 "HINFO IN 4947552381348439214.1364120457825711732. udp 57 false 512" NXDOMAIN qr,rd,ra 57 0.024084799s
	[INFO] SIGTERM: Shutting down servers then terminating
	[INFO] plugin/health: Going into lameduck mode for 5s
	
	* 
	* ==> coredns [a46f9a2d3de02376724a6dd19261947d89ea2731cc6be9f11a7fc5f18e8c69b3] <==
	* .:53
	[INFO] plugin/reload: Running configuration SHA512 = 05e3eaddc414b2d71a69b2e2bc6f2681fc1f4d04bcdd3acc1a41457bb7db518208b95ddfc4c9fffedc59c25a8faf458be1af4915a4a3c0d6777cb7a346bc5d86
	CoreDNS-1.10.1
	linux/arm64, go1.20, 055b2c3
	[INFO] 127.0.0.1:37930 - 20236 "HINFO IN 5203344202659884113.4534884378912464259. udp 57 false 512" NXDOMAIN qr,rd,ra 57 0.014683259s
	[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.26.1/tools/cache/reflector.go:169: watch of *v1.EndpointSlice ended with: very short watch: pkg/mod/k8s.io/client-go@v0.26.1/tools/cache/reflector.go:169: Unexpected watch close - watch lasted less than a second and no items received
	[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.26.1/tools/cache/reflector.go:169: watch of *v1.Namespace ended with: very short watch: pkg/mod/k8s.io/client-go@v0.26.1/tools/cache/reflector.go:169: Unexpected watch close - watch lasted less than a second and no items received
	[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.26.1/tools/cache/reflector.go:169: watch of *v1.Service ended with: very short watch: pkg/mod/k8s.io/client-go@v0.26.1/tools/cache/reflector.go:169: Unexpected watch close - watch lasted less than a second and no items received
	[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.26.1/tools/cache/reflector.go:169: failed to list *v1.EndpointSlice: Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?resourceVersion=494": dial tcp 10.96.0.1:443: connect: connection refused
	[ERROR] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.26.1/tools/cache/reflector.go:169: Failed to watch *v1.EndpointSlice: failed to list *v1.EndpointSlice: Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?resourceVersion=494": dial tcp 10.96.0.1:443: connect: connection refused
	[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.26.1/tools/cache/reflector.go:169: failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?resourceVersion=466": dial tcp 10.96.0.1:443: connect: connection refused
	[ERROR] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.26.1/tools/cache/reflector.go:169: Failed to watch *v1.Namespace: failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?resourceVersion=466": dial tcp 10.96.0.1:443: connect: connection refused
	[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.26.1/tools/cache/reflector.go:169: failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?resourceVersion=483": dial tcp 10.96.0.1:443: connect: connection refused
	[ERROR] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.26.1/tools/cache/reflector.go:169: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?resourceVersion=483": dial tcp 10.96.0.1:443: connect: connection refused
	
	* 
	* ==> describe nodes <==
	* 
	* ==> dmesg <==
	* [  +0.000754] FS-Cache: N-cookie c=0000000c [p=00000003 fl=2 nc=0 na=1]
	[  +0.001026] FS-Cache: N-cookie d=00000000c0e2b83e{9p.inode} n=00000000b55389ab
	[  +0.001117] FS-Cache: N-key=[8] 'a0385c0100000000'
	[  +0.002877] FS-Cache: Duplicate cookie detected
	[  +0.000724] FS-Cache: O-cookie c=00000006 [p=00000003 fl=226 nc=0 na=1]
	[  +0.001014] FS-Cache: O-cookie d=00000000c0e2b83e{9p.inode} n=0000000009a500bd
	[  +0.001116] FS-Cache: O-key=[8] 'a0385c0100000000'
	[  +0.000790] FS-Cache: N-cookie c=0000000d [p=00000003 fl=2 nc=0 na=1]
	[  +0.000985] FS-Cache: N-cookie d=00000000c0e2b83e{9p.inode} n=000000006d601d2f
	[  +0.001101] FS-Cache: N-key=[8] 'a0385c0100000000'
	[  +2.615492] FS-Cache: Duplicate cookie detected
	[  +0.000772] FS-Cache: O-cookie c=00000004 [p=00000003 fl=226 nc=0 na=1]
	[  +0.001004] FS-Cache: O-cookie d=00000000c0e2b83e{9p.inode} n=0000000010bf0fcb
	[  +0.001152] FS-Cache: O-key=[8] '9f385c0100000000'
	[  +0.000813] FS-Cache: N-cookie c=0000000f [p=00000003 fl=2 nc=0 na=1]
	[  +0.000990] FS-Cache: N-cookie d=00000000c0e2b83e{9p.inode} n=00000000b55389ab
	[  +0.001159] FS-Cache: N-key=[8] '9f385c0100000000'
	[  +0.329811] FS-Cache: Duplicate cookie detected
	[  +0.000747] FS-Cache: O-cookie c=00000009 [p=00000003 fl=226 nc=0 na=1]
	[  +0.001030] FS-Cache: O-cookie d=00000000c0e2b83e{9p.inode} n=00000000e473eb7d
	[  +0.001123] FS-Cache: O-key=[8] 'a7385c0100000000'
	[  +0.000733] FS-Cache: N-cookie c=00000010 [p=00000003 fl=2 nc=0 na=1]
	[  +0.000973] FS-Cache: N-cookie d=00000000c0e2b83e{9p.inode} n=000000003d8bf441
	[  +0.001090] FS-Cache: N-key=[8] 'a7385c0100000000'
	[Dec 1 17:49] kmem.limit_in_bytes is deprecated and will be removed. Please report your usecase to linux-mm@kvack.org if you depend on this functionality.
	
	* 
	* ==> etcd [0c2d4da8c8cce66a5ddb65b0ca03546edefdf740f59669699af3f7f312913564] <==
	* {"level":"info","ts":"2023-12-01T18:59:00.979045Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc switched to configuration voters=(12593026477526642892)"}
	{"level":"info","ts":"2023-12-01T18:59:00.979238Z","caller":"membership/cluster.go:421","msg":"added member","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","added-peer-id":"aec36adc501070cc","added-peer-peer-urls":["https://192.168.49.2:2380"]}
	{"level":"info","ts":"2023-12-01T18:59:00.980881Z","caller":"embed/etcd.go:726","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
	{"level":"info","ts":"2023-12-01T18:59:00.981091Z","caller":"embed/etcd.go:597","msg":"serving peer traffic","address":"192.168.49.2:2380"}
	{"level":"info","ts":"2023-12-01T18:59:00.984488Z","caller":"embed/etcd.go:569","msg":"cmux::serve","address":"192.168.49.2:2380"}
	{"level":"info","ts":"2023-12-01T18:59:00.985258Z","caller":"embed/etcd.go:278","msg":"now serving peer/client/metrics","local-member-id":"aec36adc501070cc","initial-advertise-peer-urls":["https://192.168.49.2:2380"],"listen-peer-urls":["https://192.168.49.2:2380"],"advertise-client-urls":["https://192.168.49.2:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.49.2:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
	{"level":"info","ts":"2023-12-01T18:59:00.985378Z","caller":"embed/etcd.go:855","msg":"serving metrics","address":"http://127.0.0.1:2381"}
	{"level":"info","ts":"2023-12-01T18:59:01.014113Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc is starting a new election at term 1"}
	{"level":"info","ts":"2023-12-01T18:59:01.014356Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became pre-candidate at term 1"}
	{"level":"info","ts":"2023-12-01T18:59:01.014442Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgPreVoteResp from aec36adc501070cc at term 1"}
	{"level":"info","ts":"2023-12-01T18:59:01.014567Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became candidate at term 2"}
	{"level":"info","ts":"2023-12-01T18:59:01.014651Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgVoteResp from aec36adc501070cc at term 2"}
	{"level":"info","ts":"2023-12-01T18:59:01.014744Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became leader at term 2"}
	{"level":"info","ts":"2023-12-01T18:59:01.014819Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: aec36adc501070cc elected leader aec36adc501070cc at term 2"}
	{"level":"info","ts":"2023-12-01T18:59:01.018905Z","caller":"etcdserver/server.go:2571","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
	{"level":"info","ts":"2023-12-01T18:59:01.021746Z","caller":"etcdserver/server.go:2062","msg":"published local member to cluster through raft","local-member-id":"aec36adc501070cc","local-member-attributes":"{Name:functional-616785 ClientURLs:[https://192.168.49.2:2379]}","request-path":"/0/members/aec36adc501070cc/attributes","cluster-id":"fa54960ea34d58be","publish-timeout":"7s"}
	{"level":"info","ts":"2023-12-01T18:59:01.021924Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
	{"level":"info","ts":"2023-12-01T18:59:01.023071Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
	{"level":"info","ts":"2023-12-01T18:59:01.023696Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
	{"level":"info","ts":"2023-12-01T18:59:01.024787Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.49.2:2379"}
	{"level":"info","ts":"2023-12-01T18:59:01.02872Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
	{"level":"info","ts":"2023-12-01T18:59:01.030127Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
	{"level":"info","ts":"2023-12-01T18:59:01.030283Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","cluster-version":"3.5"}
	{"level":"info","ts":"2023-12-01T18:59:01.030438Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
	{"level":"info","ts":"2023-12-01T18:59:01.030541Z","caller":"etcdserver/server.go:2595","msg":"cluster version is updated","cluster-version":"3.5"}
	
	* 
	* ==> kernel <==
	*  19:00:11 up  1:42,  0 users,  load average: 1.12, 1.59, 1.73
	Linux functional-616785 5.15.0-1050-aws #55~20.04.1-Ubuntu SMP Mon Nov 6 12:18:16 UTC 2023 aarch64 aarch64 aarch64 GNU/Linux
	PRETTY_NAME="Ubuntu 22.04.3 LTS"
	
	* 
	* ==> kindnet [9efdb32584bcd1c4938cb1074c27c1b3d95cdc91deea1013f2b99669c0621313] <==
	* I1201 18:59:23.609639       1 main.go:102] connected to apiserver: https://10.96.0.1:443
	I1201 18:59:23.609709       1 main.go:107] hostIP = 192.168.49.2
	podIP = 192.168.49.2
	I1201 18:59:23.609870       1 main.go:116] setting mtu 1500 for CNI 
	I1201 18:59:23.609891       1 main.go:146] kindnetd IP family: "ipv4"
	I1201 18:59:23.609903       1 main.go:150] noMask IPv4 subnets: [10.244.0.0/16]
	I1201 18:59:24.107651       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I1201 18:59:24.107686       1 main.go:227] handling current node
	I1201 18:59:34.206410       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I1201 18:59:34.206445       1 main.go:227] handling current node
	I1201 18:59:44.220239       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I1201 18:59:44.220267       1 main.go:227] handling current node
	I1201 18:59:54.224204       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I1201 18:59:54.224235       1 main.go:227] handling current node
	I1201 19:00:04.234747       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I1201 19:00:04.234778       1 main.go:227] handling current node
	
	* 
	* ==> kindnet [9f4183dd56963e73258dd3b04d0b72d0311cd083e5a23a4f9463bca0138848ba] <==
	* I1201 19:00:08.908137       1 main.go:102] connected to apiserver: https://10.96.0.1:443
	I1201 19:00:08.908218       1 main.go:107] hostIP = 192.168.49.2
	podIP = 192.168.49.2
	I1201 19:00:08.908403       1 main.go:116] setting mtu 1500 for CNI 
	I1201 19:00:08.908769       1 main.go:146] kindnetd IP family: "ipv4"
	I1201 19:00:08.908799       1 main.go:150] noMask IPv4 subnets: [10.244.0.0/16]
	I1201 19:00:09.306000       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I1201 19:00:09.306035       1 main.go:227] handling current node
	
	* 
	* ==> kube-apiserver [2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648] <==
	* 
	* ==> kube-apiserver [7c7908b790bf54073f1315a8c1586f4746b82b87e895963cb52f53f06a6ddfd3] <==
	* I1201 19:00:08.781067       1 options.go:220] external host was not specified, using 192.168.49.2
	I1201 19:00:08.782421       1 server.go:148] Version: v1.28.4
	I1201 19:00:08.784548       1 server.go:150] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
	E1201 19:00:08.784953       1 run.go:74] "command failed" err="failed to create listener: failed to listen on 0.0.0.0:8441: listen tcp 0.0.0.0:8441: bind: address already in use"
	
	* 
	* ==> kube-controller-manager [a01aebe75ba55e11656030032922a9fe89ca68f6b88e61a6565e71e38c259849] <==
	* I1201 18:59:20.730183       1 shared_informer.go:318] Caches are synced for bootstrap_signer
	I1201 18:59:20.735719       1 shared_informer.go:318] Caches are synced for resource quota
	I1201 18:59:21.077631       1 shared_informer.go:318] Caches are synced for garbage collector
	I1201 18:59:21.132680       1 shared_informer.go:318] Caches are synced for garbage collector
	I1201 18:59:21.132715       1 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage"
	I1201 18:59:21.188755       1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set coredns-5dd5756b68 to 2"
	I1201 18:59:21.358886       1 event.go:307] "Event occurred" object="kube-system/kindnet" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kindnet-6zm7x"
	I1201 18:59:21.372368       1 event.go:307] "Event occurred" object="kube-system/kube-proxy" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-d8cvf"
	I1201 18:59:21.549862       1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-5dd5756b68 to 1 from 2"
	I1201 18:59:21.573066       1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-sg6kz"
	I1201 18:59:21.604974       1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-ts5dh"
	I1201 18:59:21.685556       1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="497.616941ms"
	I1201 18:59:21.706789       1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-5dd5756b68-sg6kz"
	I1201 18:59:21.736168       1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="50.554121ms"
	I1201 18:59:21.766371       1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="30.149126ms"
	I1201 18:59:21.766506       1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="96.681µs"
	I1201 18:59:23.538754       1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="93.252µs"
	I1201 18:59:23.545800       1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="389.842µs"
	I1201 18:59:23.548835       1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="93.038µs"
	I1201 18:59:38.544286       1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="93.99µs"
	I1201 18:59:38.574653       1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="11.931528ms"
	I1201 18:59:38.575231       1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="74.01µs"
	I1201 19:00:08.370173       1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="23.885364ms"
	I1201 19:00:08.370264       1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="53.907µs"
	I1201 19:00:10.640613       1 node_lifecycle_controller.go:1029] "Controller detected that all Nodes are not-Ready. Entering master disruption mode"
	
	* 
	* ==> kube-proxy [1fb932a179c7041614a4fcc5715379bec1bebfa33c81139f5df677e9fa11945f] <==
	* I1201 18:59:23.498353       1 server_others.go:69] "Using iptables proxy"
	I1201 18:59:23.520600       1 node.go:141] Successfully retrieved node IP: 192.168.49.2
	I1201 18:59:23.563559       1 server.go:632] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
	I1201 18:59:23.569567       1 server_others.go:152] "Using iptables Proxier"
	I1201 18:59:23.569604       1 server_others.go:421] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
	I1201 18:59:23.569613       1 server_others.go:438] "Defaulting to no-op detect-local"
	I1201 18:59:23.569762       1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
	I1201 18:59:23.570520       1 server.go:846] "Version info" version="v1.28.4"
	I1201 18:59:23.570537       1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
	I1201 18:59:23.571624       1 config.go:188] "Starting service config controller"
	I1201 18:59:23.571771       1 shared_informer.go:311] Waiting for caches to sync for service config
	I1201 18:59:23.571801       1 config.go:97] "Starting endpoint slice config controller"
	I1201 18:59:23.571806       1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
	I1201 18:59:23.572697       1 config.go:315] "Starting node config controller"
	I1201 18:59:23.572767       1 shared_informer.go:311] Waiting for caches to sync for node config
	I1201 18:59:23.672107       1 shared_informer.go:318] Caches are synced for endpoint slice config
	I1201 18:59:23.672299       1 shared_informer.go:318] Caches are synced for service config
	I1201 18:59:23.672837       1 shared_informer.go:318] Caches are synced for node config
	
	* 
	* ==> kube-proxy [99140cde269c3bb7410bf1be86e8a909ba785666d6e95cc06fefd7c5cf8c76e3] <==
	* I1201 19:00:08.965043       1 server.go:632] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
	I1201 19:00:08.967337       1 server_others.go:152] "Using iptables Proxier"
	I1201 19:00:08.967428       1 server_others.go:421] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
	I1201 19:00:08.967456       1 server_others.go:438] "Defaulting to no-op detect-local"
	I1201 19:00:08.967556       1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
	I1201 19:00:08.967825       1 server.go:846] "Version info" version="v1.28.4"
	I1201 19:00:08.968039       1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
	I1201 19:00:08.969104       1 config.go:188] "Starting service config controller"
	I1201 19:00:08.969469       1 shared_informer.go:311] Waiting for caches to sync for service config
	I1201 19:00:08.969647       1 config.go:97] "Starting endpoint slice config controller"
	I1201 19:00:08.969728       1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
	I1201 19:00:08.972773       1 config.go:315] "Starting node config controller"
	I1201 19:00:08.972913       1 shared_informer.go:311] Waiting for caches to sync for node config
	I1201 19:00:09.070117       1 shared_informer.go:318] Caches are synced for endpoint slice config
	I1201 19:00:09.070124       1 shared_informer.go:318] Caches are synced for service config
	I1201 19:00:09.073390       1 shared_informer.go:318] Caches are synced for node config
	W1201 19:00:09.365278       1 reflector.go:458] vendor/k8s.io/client-go/informers/factory.go:150: watch of *v1.Node ended with: very short watch: vendor/k8s.io/client-go/informers/factory.go:150: Unexpected watch close - watch lasted less than a second and no items received
	W1201 19:00:09.365342       1 reflector.go:458] vendor/k8s.io/client-go/informers/factory.go:150: watch of *v1.Service ended with: very short watch: vendor/k8s.io/client-go/informers/factory.go:150: Unexpected watch close - watch lasted less than a second and no items received
	W1201 19:00:09.365367       1 reflector.go:458] vendor/k8s.io/client-go/informers/factory.go:150: watch of *v1.EndpointSlice ended with: very short watch: vendor/k8s.io/client-go/informers/factory.go:150: Unexpected watch close - watch lasted less than a second and no items received
	W1201 19:00:10.432966       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Node: Get "https://control-plane.minikube.internal:8441/api/v1/nodes?fieldSelector=metadata.name%!D(MISSING)functional-616785&resourceVersion=476": dial tcp 192.168.49.2:8441: connect: connection refused
	E1201 19:00:10.433018       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Node: failed to list *v1.Node: Get "https://control-plane.minikube.internal:8441/api/v1/nodes?fieldSelector=metadata.name%!D(MISSING)functional-616785&resourceVersion=476": dial tcp 192.168.49.2:8441: connect: connection refused
	W1201 19:00:10.557764       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.EndpointSlice: Get "https://control-plane.minikube.internal:8441/apis/discovery.k8s.io/v1/endpointslices?labelSelector=%!s(MISSING)ervice.kubernetes.io%!F(MISSING)headless%!C(MISSING)%!s(MISSING)ervice.kubernetes.io%!F(MISSING)service-proxy-name&resourceVersion=494": dial tcp 192.168.49.2:8441: connect: connection refused
	E1201 19:00:10.557826       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.EndpointSlice: failed to list *v1.EndpointSlice: Get "https://control-plane.minikube.internal:8441/apis/discovery.k8s.io/v1/endpointslices?labelSelector=%!s(MISSING)ervice.kubernetes.io%!F(MISSING)headless%!C(MISSING)%!s(MISSING)ervice.kubernetes.io%!F(MISSING)service-proxy-name&resourceVersion=494": dial tcp 192.168.49.2:8441: connect: connection refused
	W1201 19:00:10.878895       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Service: Get "https://control-plane.minikube.internal:8441/api/v1/services?labelSelector=%!s(MISSING)ervice.kubernetes.io%!F(MISSING)headless%!C(MISSING)%!s(MISSING)ervice.kubernetes.io%!F(MISSING)service-proxy-name&resourceVersion=483": dial tcp 192.168.49.2:8441: connect: connection refused
	E1201 19:00:10.878942       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://control-plane.minikube.internal:8441/api/v1/services?labelSelector=%!s(MISSING)ervice.kubernetes.io%!F(MISSING)headless%!C(MISSING)%!s(MISSING)ervice.kubernetes.io%!F(MISSING)service-proxy-name&resourceVersion=483": dial tcp 192.168.49.2:8441: connect: connection refused
	
	* 
	* ==> kube-scheduler [6353341ab4be94e3126a82f86e5b7b6c810a95a11c1532a6524b40001c56ec1e] <==
	* W1201 18:59:04.862831       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
	E1201 18:59:04.863943       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
	W1201 18:59:05.669584       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
	E1201 18:59:05.669624       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
	W1201 18:59:05.672101       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
	E1201 18:59:05.672139       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
	W1201 18:59:05.677069       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
	E1201 18:59:05.677112       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
	W1201 18:59:05.746199       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
	E1201 18:59:05.746534       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
	W1201 18:59:05.828450       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
	E1201 18:59:05.828519       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
	W1201 18:59:05.912441       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
	E1201 18:59:05.912680       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
	W1201 18:59:05.976223       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
	E1201 18:59:05.976399       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
	W1201 18:59:05.979103       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
	E1201 18:59:05.979232       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
	W1201 18:59:06.021165       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
	E1201 18:59:06.021218       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
	W1201 18:59:06.082719       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
	E1201 18:59:06.082973       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
	W1201 18:59:06.148578       1 reflector.go:535] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
	E1201 18:59:06.148877       1 reflector.go:147] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
	I1201 18:59:08.317482       1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
	
	* 
	* ==> kubelet <==
	* Dec 01 19:00:09 functional-616785 kubelet[3579]: I1201 19:00:09.460002    3579 status_manager.go:853] "Failed to get status for pod" podUID="e02380f9-bcfc-4a99-a5c0-e2372e5f0ad4" pod="kube-system/kindnet-6zm7x" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/kindnet-6zm7x\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:09 functional-616785 kubelet[3579]: I1201 19:00:09.460171    3579 status_manager.go:853] "Failed to get status for pod" podUID="f95b7b7d-84db-44db-8038-dcfd7e1ab770" pod="kube-system/kube-proxy-d8cvf" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/kube-proxy-d8cvf\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:09 functional-616785 kubelet[3579]: I1201 19:00:09.463121    3579 status_manager.go:853] "Failed to get status for pod" podUID="f95b7b7d-84db-44db-8038-dcfd7e1ab770" pod="kube-system/kube-proxy-d8cvf" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/kube-proxy-d8cvf\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:09 functional-616785 kubelet[3579]: I1201 19:00:09.463422    3579 status_manager.go:853] "Failed to get status for pod" podUID="064115bd7120268299a4b217a1add347" pod="kube-system/kube-apiserver-functional-616785" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/kube-apiserver-functional-616785\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:09 functional-616785 kubelet[3579]: I1201 19:00:09.463758    3579 status_manager.go:853] "Failed to get status for pod" podUID="393607ea-a066-4d39-93eb-75c52a6ab29e" pod="kube-system/storage-provisioner" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/storage-provisioner\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:09 functional-616785 kubelet[3579]: I1201 19:00:09.463987    3579 status_manager.go:853] "Failed to get status for pod" podUID="e02380f9-bcfc-4a99-a5c0-e2372e5f0ad4" pod="kube-system/kindnet-6zm7x" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/kindnet-6zm7x\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:09 functional-616785 kubelet[3579]: I1201 19:00:09.464223    3579 status_manager.go:853] "Failed to get status for pod" podUID="daaa92f6-6743-4e3e-a5e9-0d3bea42c1e0" pod="kube-system/coredns-5dd5756b68-ts5dh" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/coredns-5dd5756b68-ts5dh\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:10 functional-616785 kubelet[3579]: I1201 19:00:10.466587    3579 scope.go:117] "RemoveContainer" containerID="7c7908b790bf54073f1315a8c1586f4746b82b87e895963cb52f53f06a6ddfd3"
	Dec 01 19:00:10 functional-616785 kubelet[3579]: E1201 19:00:10.467160    3579 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-616785_kube-system(064115bd7120268299a4b217a1add347)\"" pod="kube-system/kube-apiserver-functional-616785" podUID="064115bd7120268299a4b217a1add347"
	Dec 01 19:00:10 functional-616785 kubelet[3579]: I1201 19:00:10.467566    3579 status_manager.go:853] "Failed to get status for pod" podUID="064115bd7120268299a4b217a1add347" pod="kube-system/kube-apiserver-functional-616785" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/kube-apiserver-functional-616785\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:10 functional-616785 kubelet[3579]: I1201 19:00:10.467758    3579 status_manager.go:853] "Failed to get status for pod" podUID="393607ea-a066-4d39-93eb-75c52a6ab29e" pod="kube-system/storage-provisioner" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/storage-provisioner\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:10 functional-616785 kubelet[3579]: I1201 19:00:10.467915    3579 status_manager.go:853] "Failed to get status for pod" podUID="e02380f9-bcfc-4a99-a5c0-e2372e5f0ad4" pod="kube-system/kindnet-6zm7x" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/kindnet-6zm7x\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:10 functional-616785 kubelet[3579]: I1201 19:00:10.468260    3579 status_manager.go:853] "Failed to get status for pod" podUID="daaa92f6-6743-4e3e-a5e9-0d3bea42c1e0" pod="kube-system/coredns-5dd5756b68-ts5dh" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/coredns-5dd5756b68-ts5dh\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:10 functional-616785 kubelet[3579]: I1201 19:00:10.468455    3579 status_manager.go:853] "Failed to get status for pod" podUID="f95b7b7d-84db-44db-8038-dcfd7e1ab770" pod="kube-system/kube-proxy-d8cvf" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/kube-proxy-d8cvf\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:10 functional-616785 kubelet[3579]: I1201 19:00:10.469814    3579 scope.go:117] "RemoveContainer" containerID="2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648"
	Dec 01 19:00:10 functional-616785 kubelet[3579]: I1201 19:00:10.478022    3579 scope.go:117] "RemoveContainer" containerID="2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648"
	Dec 01 19:00:10 functional-616785 kubelet[3579]: E1201 19:00:10.478673    3579 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = an error occurred when try to find container \"2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648\": not found" containerID="2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648"
	Dec 01 19:00:10 functional-616785 kubelet[3579]: I1201 19:00:10.478773    3579 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"containerd","ID":"2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648"} err="failed to get container status \"2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648\": rpc error: code = NotFound desc = an error occurred when try to find container \"2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648\": not found"
	Dec 01 19:00:10 functional-616785 kubelet[3579]: I1201 19:00:10.897697    3579 status_manager.go:853] "Failed to get status for pod" podUID="064115bd7120268299a4b217a1add347" pod="kube-system/kube-apiserver-functional-616785" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/kube-apiserver-functional-616785\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:10 functional-616785 kubelet[3579]: I1201 19:00:10.897915    3579 status_manager.go:853] "Failed to get status for pod" podUID="393607ea-a066-4d39-93eb-75c52a6ab29e" pod="kube-system/storage-provisioner" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/storage-provisioner\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:10 functional-616785 kubelet[3579]: I1201 19:00:10.898092    3579 status_manager.go:853] "Failed to get status for pod" podUID="e02380f9-bcfc-4a99-a5c0-e2372e5f0ad4" pod="kube-system/kindnet-6zm7x" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/kindnet-6zm7x\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:10 functional-616785 kubelet[3579]: I1201 19:00:10.898260    3579 status_manager.go:853] "Failed to get status for pod" podUID="daaa92f6-6743-4e3e-a5e9-0d3bea42c1e0" pod="kube-system/coredns-5dd5756b68-ts5dh" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/coredns-5dd5756b68-ts5dh\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:10 functional-616785 kubelet[3579]: I1201 19:00:10.898431    3579 status_manager.go:853] "Failed to get status for pod" podUID="f95b7b7d-84db-44db-8038-dcfd7e1ab770" pod="kube-system/kube-proxy-d8cvf" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/kube-proxy-d8cvf\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:10 functional-616785 kubelet[3579]: I1201 19:00:10.898600    3579 status_manager.go:853] "Failed to get status for pod" podUID="b5714ae4c71e32df6a08d33071c9d40f" pod="kube-system/kube-scheduler-functional-616785" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/kube-scheduler-functional-616785\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:11 functional-616785 kubelet[3579]: I1201 19:00:11.307744    3579 kubelet_volumes.go:161] "Cleaned up orphaned pod volumes dir" podUID="399196d44eb6c509f7b60d72c5662125" path="/var/lib/kubelet/pods/399196d44eb6c509f7b60d72c5662125/volumes"
	
	* 
	* ==> storage-provisioner [172a910da74d5d4bd72533e43c42af2574c81da2cb8735f3148fb22366fc7b38] <==
	* I1201 18:59:53.741614       1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
	I1201 18:59:53.754609       1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
	I1201 18:59:53.754704       1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
	I1201 18:59:53.763812       1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
	I1201 18:59:53.766136       1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_functional-616785_31b3a833-b9d7-486c-b75b-c46b58186b32!
	I1201 18:59:53.766892       1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"dc41cc85-2a39-4b80-b12c-8ed2ca2b6e8f", APIVersion:"v1", ResourceVersion:"456", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' functional-616785_31b3a833-b9d7-486c-b75b-c46b58186b32 became leader
	I1201 18:59:53.866332       1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_functional-616785_31b3a833-b9d7-486c-b75b-c46b58186b32!
	
	* 
	* ==> storage-provisioner [477d114576c3196c1f8fc46abb75a2b06e00afd9cf1d61a1aac989f769c7e723] <==
	* I1201 19:00:08.787745       1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
	I1201 19:00:08.820066       1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
	I1201 19:00:08.820690       1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	E1201 19:00:10.917394  286048 logs.go:195] command /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.4/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" failed with error: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.4/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8441 was refused - did you specify the right host or port?
	 output: "\n** stderr ** \nThe connection to the server localhost:8441 was refused - did you specify the right host or port?\n\n** /stderr **"
	E1201 19:00:11.168079  286048 logs.go:195] command /bin/bash -c "sudo /usr/bin/crictl logs --tail 25 2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648" failed with error: /bin/bash -c "sudo /usr/bin/crictl logs --tail 25 2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648": Process exited with status 1
	stdout:
	
	stderr:
	E1201 19:00:11.164546    4283 remote_runtime.go:625] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = an error occurred when try to find container \"2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648\": not found" containerID="2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648"
	time="2023-12-01T19:00:11Z" level=fatal msg="rpc error: code = NotFound desc = an error occurred when try to find container \"2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648\": not found"
	 output: "\n** stderr ** \nE1201 19:00:11.164546    4283 remote_runtime.go:625] \"ContainerStatus from runtime service failed\" err=\"rpc error: code = NotFound desc = an error occurred when try to find container \\\"2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648\\\": not found\" containerID=\"2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648\"\ntime=\"2023-12-01T19:00:11Z\" level=fatal msg=\"rpc error: code = NotFound desc = an error occurred when try to find container \\\"2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648\\\": not found\"\n\n** /stderr **"
	! unable to fetch logs for: describe nodes, kube-apiserver [2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648]

                                                
                                                
** /stderr **
helpers_test.go:254: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p functional-616785 -n functional-616785
helpers_test.go:254: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.APIServer}} -p functional-616785 -n functional-616785: exit status 2 (399.601675ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
helpers_test.go:254: status error: exit status 2 (may be ok)
helpers_test.go:256: "functional-616785" apiserver is not running, skipping kubectl commands (state="Stopped")
--- FAIL: TestFunctional/serial/ExtraConfig (16.46s)

                                                
                                    
x
+
TestFunctional/serial/ComponentHealth (2.43s)

                                                
                                                
=== RUN   TestFunctional/serial/ComponentHealth
functional_test.go:806: (dbg) Run:  kubectl --context functional-616785 get po -l tier=control-plane -n kube-system -o=json
functional_test.go:806: (dbg) Non-zero exit: kubectl --context functional-616785 get po -l tier=control-plane -n kube-system -o=json: exit status 1 (72.080324ms)

                                                
                                                
-- stdout --
	{
	    "apiVersion": "v1",
	    "items": [],
	    "kind": "List",
	    "metadata": {
	        "resourceVersion": ""
	    }
	}

                                                
                                                
-- /stdout --
** stderr ** 
	The connection to the server 192.168.49.2:8441 was refused - did you specify the right host or port?

                                                
                                                
** /stderr **
functional_test.go:808: failed to get components. args "kubectl --context functional-616785 get po -l tier=control-plane -n kube-system -o=json": exit status 1
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======>  post-mortem[TestFunctional/serial/ComponentHealth]: docker inspect <======
helpers_test.go:231: (dbg) Run:  docker inspect functional-616785
helpers_test.go:235: (dbg) docker inspect functional-616785:

                                                
                                                
-- stdout --
	[
	    {
	        "Id": "8fb32b74d7c5c78662dd1fc46356f2a9d1acf6222d4c0e4ac405b58b46a669fd",
	        "Created": "2023-12-01T18:58:44.056091264Z",
	        "Path": "/usr/local/bin/entrypoint",
	        "Args": [
	            "/sbin/init"
	        ],
	        "State": {
	            "Status": "running",
	            "Running": true,
	            "Paused": false,
	            "Restarting": false,
	            "OOMKilled": false,
	            "Dead": false,
	            "Pid": 280935,
	            "ExitCode": 0,
	            "Error": "",
	            "StartedAt": "2023-12-01T18:58:44.391941652Z",
	            "FinishedAt": "0001-01-01T00:00:00Z"
	        },
	        "Image": "sha256:e4e0f3cc6f04c458835e9edb05d52f031520d40521bc3568d81cbb7c06a79ef2",
	        "ResolvConfPath": "/var/lib/docker/containers/8fb32b74d7c5c78662dd1fc46356f2a9d1acf6222d4c0e4ac405b58b46a669fd/resolv.conf",
	        "HostnamePath": "/var/lib/docker/containers/8fb32b74d7c5c78662dd1fc46356f2a9d1acf6222d4c0e4ac405b58b46a669fd/hostname",
	        "HostsPath": "/var/lib/docker/containers/8fb32b74d7c5c78662dd1fc46356f2a9d1acf6222d4c0e4ac405b58b46a669fd/hosts",
	        "LogPath": "/var/lib/docker/containers/8fb32b74d7c5c78662dd1fc46356f2a9d1acf6222d4c0e4ac405b58b46a669fd/8fb32b74d7c5c78662dd1fc46356f2a9d1acf6222d4c0e4ac405b58b46a669fd-json.log",
	        "Name": "/functional-616785",
	        "RestartCount": 0,
	        "Driver": "overlay2",
	        "Platform": "linux",
	        "MountLabel": "",
	        "ProcessLabel": "",
	        "AppArmorProfile": "unconfined",
	        "ExecIDs": null,
	        "HostConfig": {
	            "Binds": [
	                "functional-616785:/var",
	                "/lib/modules:/lib/modules:ro"
	            ],
	            "ContainerIDFile": "",
	            "LogConfig": {
	                "Type": "json-file",
	                "Config": {}
	            },
	            "NetworkMode": "functional-616785",
	            "PortBindings": {
	                "22/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "2376/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "32443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "5000/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "8441/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ]
	            },
	            "RestartPolicy": {
	                "Name": "no",
	                "MaximumRetryCount": 0
	            },
	            "AutoRemove": false,
	            "VolumeDriver": "",
	            "VolumesFrom": null,
	            "ConsoleSize": [
	                0,
	                0
	            ],
	            "CapAdd": null,
	            "CapDrop": null,
	            "CgroupnsMode": "host",
	            "Dns": [],
	            "DnsOptions": [],
	            "DnsSearch": [],
	            "ExtraHosts": null,
	            "GroupAdd": null,
	            "IpcMode": "private",
	            "Cgroup": "",
	            "Links": null,
	            "OomScoreAdj": 0,
	            "PidMode": "",
	            "Privileged": true,
	            "PublishAllPorts": false,
	            "ReadonlyRootfs": false,
	            "SecurityOpt": [
	                "seccomp=unconfined",
	                "apparmor=unconfined",
	                "label=disable"
	            ],
	            "Tmpfs": {
	                "/run": "",
	                "/tmp": ""
	            },
	            "UTSMode": "",
	            "UsernsMode": "",
	            "ShmSize": 67108864,
	            "Runtime": "runc",
	            "Isolation": "",
	            "CpuShares": 0,
	            "Memory": 4194304000,
	            "NanoCpus": 2000000000,
	            "CgroupParent": "",
	            "BlkioWeight": 0,
	            "BlkioWeightDevice": [],
	            "BlkioDeviceReadBps": [],
	            "BlkioDeviceWriteBps": [],
	            "BlkioDeviceReadIOps": [],
	            "BlkioDeviceWriteIOps": [],
	            "CpuPeriod": 0,
	            "CpuQuota": 0,
	            "CpuRealtimePeriod": 0,
	            "CpuRealtimeRuntime": 0,
	            "CpusetCpus": "",
	            "CpusetMems": "",
	            "Devices": [],
	            "DeviceCgroupRules": null,
	            "DeviceRequests": null,
	            "MemoryReservation": 0,
	            "MemorySwap": 8388608000,
	            "MemorySwappiness": null,
	            "OomKillDisable": false,
	            "PidsLimit": null,
	            "Ulimits": null,
	            "CpuCount": 0,
	            "CpuPercent": 0,
	            "IOMaximumIOps": 0,
	            "IOMaximumBandwidth": 0,
	            "MaskedPaths": null,
	            "ReadonlyPaths": null
	        },
	        "GraphDriver": {
	            "Data": {
	                "LowerDir": "/var/lib/docker/overlay2/90a1eebdd2de22d9f2af0c999b2d595a52358bdd67ea923fc6f73d4dddfbf487-init/diff:/var/lib/docker/overlay2/049ae54891020b74263d4d0f668244f51ae19df0871773fd59686314976f2fd9/diff",
	                "MergedDir": "/var/lib/docker/overlay2/90a1eebdd2de22d9f2af0c999b2d595a52358bdd67ea923fc6f73d4dddfbf487/merged",
	                "UpperDir": "/var/lib/docker/overlay2/90a1eebdd2de22d9f2af0c999b2d595a52358bdd67ea923fc6f73d4dddfbf487/diff",
	                "WorkDir": "/var/lib/docker/overlay2/90a1eebdd2de22d9f2af0c999b2d595a52358bdd67ea923fc6f73d4dddfbf487/work"
	            },
	            "Name": "overlay2"
	        },
	        "Mounts": [
	            {
	                "Type": "volume",
	                "Name": "functional-616785",
	                "Source": "/var/lib/docker/volumes/functional-616785/_data",
	                "Destination": "/var",
	                "Driver": "local",
	                "Mode": "z",
	                "RW": true,
	                "Propagation": ""
	            },
	            {
	                "Type": "bind",
	                "Source": "/lib/modules",
	                "Destination": "/lib/modules",
	                "Mode": "ro",
	                "RW": false,
	                "Propagation": "rprivate"
	            }
	        ],
	        "Config": {
	            "Hostname": "functional-616785",
	            "Domainname": "",
	            "User": "",
	            "AttachStdin": false,
	            "AttachStdout": false,
	            "AttachStderr": false,
	            "ExposedPorts": {
	                "22/tcp": {},
	                "2376/tcp": {},
	                "32443/tcp": {},
	                "5000/tcp": {},
	                "8441/tcp": {}
	            },
	            "Tty": true,
	            "OpenStdin": false,
	            "StdinOnce": false,
	            "Env": [
	                "container=docker",
	                "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
	            ],
	            "Cmd": null,
	            "Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f",
	            "Volumes": null,
	            "WorkingDir": "/",
	            "Entrypoint": [
	                "/usr/local/bin/entrypoint",
	                "/sbin/init"
	            ],
	            "OnBuild": null,
	            "Labels": {
	                "created_by.minikube.sigs.k8s.io": "true",
	                "mode.minikube.sigs.k8s.io": "functional-616785",
	                "name.minikube.sigs.k8s.io": "functional-616785",
	                "role.minikube.sigs.k8s.io": ""
	            },
	            "StopSignal": "SIGRTMIN+3"
	        },
	        "NetworkSettings": {
	            "Bridge": "",
	            "SandboxID": "11283c9328ce6444489fa187690a1d377758621c63df408c957a63839729f49c",
	            "HairpinMode": false,
	            "LinkLocalIPv6Address": "",
	            "LinkLocalIPv6PrefixLen": 0,
	            "Ports": {
	                "22/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33098"
	                    }
	                ],
	                "2376/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33097"
	                    }
	                ],
	                "32443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33094"
	                    }
	                ],
	                "5000/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33096"
	                    }
	                ],
	                "8441/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33095"
	                    }
	                ]
	            },
	            "SandboxKey": "/var/run/docker/netns/11283c9328ce",
	            "SecondaryIPAddresses": null,
	            "SecondaryIPv6Addresses": null,
	            "EndpointID": "",
	            "Gateway": "",
	            "GlobalIPv6Address": "",
	            "GlobalIPv6PrefixLen": 0,
	            "IPAddress": "",
	            "IPPrefixLen": 0,
	            "IPv6Gateway": "",
	            "MacAddress": "",
	            "Networks": {
	                "functional-616785": {
	                    "IPAMConfig": {
	                        "IPv4Address": "192.168.49.2"
	                    },
	                    "Links": null,
	                    "Aliases": [
	                        "8fb32b74d7c5",
	                        "functional-616785"
	                    ],
	                    "NetworkID": "74ca7b05d66aad62d2aa010c91353279be0cd04ba9f5adfee2b7f19ac02a8b0b",
	                    "EndpointID": "b3b040ba0b1e93d2839ba89f4badacd90b65ae2a20ac975e184e5c30500b6587",
	                    "Gateway": "192.168.49.1",
	                    "IPAddress": "192.168.49.2",
	                    "IPPrefixLen": 24,
	                    "IPv6Gateway": "",
	                    "GlobalIPv6Address": "",
	                    "GlobalIPv6PrefixLen": 0,
	                    "MacAddress": "02:42:c0:a8:31:02",
	                    "DriverOpts": null
	                }
	            }
	        }
	    }
	]

                                                
                                                
-- /stdout --
helpers_test.go:239: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p functional-616785 -n functional-616785
helpers_test.go:239: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Host}} -p functional-616785 -n functional-616785: exit status 2 (367.381208ms)

                                                
                                                
-- stdout --
	Running

                                                
                                                
-- /stdout --
helpers_test.go:239: status error: exit status 2 (may be ok)
helpers_test.go:244: <<< TestFunctional/serial/ComponentHealth FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======>  post-mortem[TestFunctional/serial/ComponentHealth]: minikube logs <======
helpers_test.go:247: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 logs -n 25
helpers_test.go:247: (dbg) Done: out/minikube-linux-arm64 -p functional-616785 logs -n 25: (1.574922032s)
helpers_test.go:252: TestFunctional/serial/ComponentHealth logs: 
-- stdout --
	* 
	* ==> Audit <==
	* |---------|--------------------------------------------------------------------------|-------------------|---------|---------|---------------------|---------------------|
	| Command |                                   Args                                   |      Profile      |  User   | Version |     Start Time      |      End Time       |
	|---------|--------------------------------------------------------------------------|-------------------|---------|---------|---------------------|---------------------|
	| unpause | nospam-163628 --log_dir                                                  | nospam-163628     | jenkins | v1.32.0 | 01 Dec 23 18:58 UTC | 01 Dec 23 18:58 UTC |
	|         | /tmp/nospam-163628 unpause                                               |                   |         |         |                     |                     |
	| unpause | nospam-163628 --log_dir                                                  | nospam-163628     | jenkins | v1.32.0 | 01 Dec 23 18:58 UTC | 01 Dec 23 18:58 UTC |
	|         | /tmp/nospam-163628 unpause                                               |                   |         |         |                     |                     |
	| unpause | nospam-163628 --log_dir                                                  | nospam-163628     | jenkins | v1.32.0 | 01 Dec 23 18:58 UTC | 01 Dec 23 18:58 UTC |
	|         | /tmp/nospam-163628 unpause                                               |                   |         |         |                     |                     |
	| stop    | nospam-163628 --log_dir                                                  | nospam-163628     | jenkins | v1.32.0 | 01 Dec 23 18:58 UTC | 01 Dec 23 18:58 UTC |
	|         | /tmp/nospam-163628 stop                                                  |                   |         |         |                     |                     |
	| stop    | nospam-163628 --log_dir                                                  | nospam-163628     | jenkins | v1.32.0 | 01 Dec 23 18:58 UTC | 01 Dec 23 18:58 UTC |
	|         | /tmp/nospam-163628 stop                                                  |                   |         |         |                     |                     |
	| stop    | nospam-163628 --log_dir                                                  | nospam-163628     | jenkins | v1.32.0 | 01 Dec 23 18:58 UTC | 01 Dec 23 18:58 UTC |
	|         | /tmp/nospam-163628 stop                                                  |                   |         |         |                     |                     |
	| delete  | -p nospam-163628                                                         | nospam-163628     | jenkins | v1.32.0 | 01 Dec 23 18:58 UTC | 01 Dec 23 18:58 UTC |
	| start   | -p functional-616785                                                     | functional-616785 | jenkins | v1.32.0 | 01 Dec 23 18:58 UTC | 01 Dec 23 18:59 UTC |
	|         | --memory=4000                                                            |                   |         |         |                     |                     |
	|         | --apiserver-port=8441                                                    |                   |         |         |                     |                     |
	|         | --wait=all --driver=docker                                               |                   |         |         |                     |                     |
	|         | --container-runtime=containerd                                           |                   |         |         |                     |                     |
	| start   | -p functional-616785                                                     | functional-616785 | jenkins | v1.32.0 | 01 Dec 23 18:59 UTC | 01 Dec 23 18:59 UTC |
	|         | --alsologtostderr -v=8                                                   |                   |         |         |                     |                     |
	| cache   | functional-616785 cache add                                              | functional-616785 | jenkins | v1.32.0 | 01 Dec 23 18:59 UTC | 01 Dec 23 18:59 UTC |
	|         | registry.k8s.io/pause:3.1                                                |                   |         |         |                     |                     |
	| cache   | functional-616785 cache add                                              | functional-616785 | jenkins | v1.32.0 | 01 Dec 23 18:59 UTC | 01 Dec 23 18:59 UTC |
	|         | registry.k8s.io/pause:3.3                                                |                   |         |         |                     |                     |
	| cache   | functional-616785 cache add                                              | functional-616785 | jenkins | v1.32.0 | 01 Dec 23 18:59 UTC | 01 Dec 23 18:59 UTC |
	|         | registry.k8s.io/pause:latest                                             |                   |         |         |                     |                     |
	| cache   | functional-616785 cache add                                              | functional-616785 | jenkins | v1.32.0 | 01 Dec 23 18:59 UTC | 01 Dec 23 18:59 UTC |
	|         | minikube-local-cache-test:functional-616785                              |                   |         |         |                     |                     |
	| cache   | functional-616785 cache delete                                           | functional-616785 | jenkins | v1.32.0 | 01 Dec 23 18:59 UTC | 01 Dec 23 18:59 UTC |
	|         | minikube-local-cache-test:functional-616785                              |                   |         |         |                     |                     |
	| cache   | delete                                                                   | minikube          | jenkins | v1.32.0 | 01 Dec 23 18:59 UTC | 01 Dec 23 18:59 UTC |
	|         | registry.k8s.io/pause:3.3                                                |                   |         |         |                     |                     |
	| cache   | list                                                                     | minikube          | jenkins | v1.32.0 | 01 Dec 23 18:59 UTC | 01 Dec 23 18:59 UTC |
	| ssh     | functional-616785 ssh sudo                                               | functional-616785 | jenkins | v1.32.0 | 01 Dec 23 18:59 UTC | 01 Dec 23 18:59 UTC |
	|         | crictl images                                                            |                   |         |         |                     |                     |
	| ssh     | functional-616785                                                        | functional-616785 | jenkins | v1.32.0 | 01 Dec 23 18:59 UTC | 01 Dec 23 18:59 UTC |
	|         | ssh sudo crictl rmi                                                      |                   |         |         |                     |                     |
	|         | registry.k8s.io/pause:latest                                             |                   |         |         |                     |                     |
	| ssh     | functional-616785 ssh                                                    | functional-616785 | jenkins | v1.32.0 | 01 Dec 23 18:59 UTC |                     |
	|         | sudo crictl inspecti                                                     |                   |         |         |                     |                     |
	|         | registry.k8s.io/pause:latest                                             |                   |         |         |                     |                     |
	| cache   | functional-616785 cache reload                                           | functional-616785 | jenkins | v1.32.0 | 01 Dec 23 18:59 UTC | 01 Dec 23 18:59 UTC |
	| ssh     | functional-616785 ssh                                                    | functional-616785 | jenkins | v1.32.0 | 01 Dec 23 18:59 UTC | 01 Dec 23 18:59 UTC |
	|         | sudo crictl inspecti                                                     |                   |         |         |                     |                     |
	|         | registry.k8s.io/pause:latest                                             |                   |         |         |                     |                     |
	| cache   | delete                                                                   | minikube          | jenkins | v1.32.0 | 01 Dec 23 18:59 UTC | 01 Dec 23 18:59 UTC |
	|         | registry.k8s.io/pause:3.1                                                |                   |         |         |                     |                     |
	| cache   | delete                                                                   | minikube          | jenkins | v1.32.0 | 01 Dec 23 18:59 UTC | 01 Dec 23 18:59 UTC |
	|         | registry.k8s.io/pause:latest                                             |                   |         |         |                     |                     |
	| kubectl | functional-616785 kubectl --                                             | functional-616785 | jenkins | v1.32.0 | 01 Dec 23 18:59 UTC | 01 Dec 23 18:59 UTC |
	|         | --context functional-616785                                              |                   |         |         |                     |                     |
	|         | get pods                                                                 |                   |         |         |                     |                     |
	| start   | -p functional-616785                                                     | functional-616785 | jenkins | v1.32.0 | 01 Dec 23 18:59 UTC |                     |
	|         | --extra-config=apiserver.enable-admission-plugins=NamespaceAutoProvision |                   |         |         |                     |                     |
	|         | --wait=all                                                               |                   |         |         |                     |                     |
	|---------|--------------------------------------------------------------------------|-------------------|---------|---------|---------------------|---------------------|
	
	* 
	* ==> Last Start <==
	* Log file created at: 2023/12/01 18:59:55
	Running on machine: ip-172-31-31-251
	Binary: Built with gc go1.21.4 for linux/arm64
	Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
	I1201 18:59:55.615978  284652 out.go:296] Setting OutFile to fd 1 ...
	I1201 18:59:55.616150  284652 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I1201 18:59:55.616154  284652 out.go:309] Setting ErrFile to fd 2...
	I1201 18:59:55.616158  284652 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I1201 18:59:55.616415  284652 root.go:338] Updating PATH: /home/jenkins/minikube-integration/17703-252966/.minikube/bin
	I1201 18:59:55.617256  284652 out.go:303] Setting JSON to false
	I1201 18:59:55.618314  284652 start.go:128] hostinfo: {"hostname":"ip-172-31-31-251","uptime":6142,"bootTime":1701451054,"procs":240,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1050-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"982e3628-3742-4b3e-bb63-ac1b07660ec7"}
	I1201 18:59:55.618381  284652 start.go:138] virtualization:  
	I1201 18:59:55.620456  284652 out.go:177] * [functional-616785] minikube v1.32.0 on Ubuntu 20.04 (arm64)
	I1201 18:59:55.622614  284652 out.go:177]   - MINIKUBE_LOCATION=17703
	I1201 18:59:55.622808  284652 notify.go:220] Checking for updates...
	I1201 18:59:55.624517  284652 out.go:177]   - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I1201 18:59:55.626290  284652 out.go:177]   - KUBECONFIG=/home/jenkins/minikube-integration/17703-252966/kubeconfig
	I1201 18:59:55.628084  284652 out.go:177]   - MINIKUBE_HOME=/home/jenkins/minikube-integration/17703-252966/.minikube
	I1201 18:59:55.629702  284652 out.go:177]   - MINIKUBE_BIN=out/minikube-linux-arm64
	I1201 18:59:55.631323  284652 out.go:177]   - MINIKUBE_FORCE_SYSTEMD=
	I1201 18:59:55.633537  284652 config.go:182] Loaded profile config "functional-616785": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.4
	I1201 18:59:55.633635  284652 driver.go:392] Setting default libvirt URI to qemu:///system
	I1201 18:59:55.658918  284652 docker.go:122] docker version: linux-24.0.7:Docker Engine - Community
	I1201 18:59:55.659044  284652 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I1201 18:59:55.756934  284652 info.go:266] docker info: {ID:EOU5:DNGX:XN6V:L2FZ:UXRM:5TWK:EVUR:KC2F:GT7Z:Y4O4:GB77:5PD3 Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:35 OomKillDisable:true NGoroutines:54 SystemTime:2023-12-01 18:59:55.745393931 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1050-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archi
tecture:aarch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8215040000 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-31-251 Labels:[] ExperimentalBuild:false ServerVersion:24.0.7 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f Expected:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f} RuncCommit:{ID:v1.1.10-0-g18a0cb0 Expected:v1.1.10-0-g18a0cb0} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.21.0]] Warnings:<nil>}}
	I1201 18:59:55.757022  284652 docker.go:295] overlay module found
	I1201 18:59:55.759278  284652 out.go:177] * Using the docker driver based on existing profile
	I1201 18:59:55.761446  284652 start.go:298] selected driver: docker
	I1201 18:59:55.761457  284652 start.go:902] validating driver "docker" against &{Name:functional-616785 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f Memory:4000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.4 ClusterName:functional-616785 Namespace:default APIServerName:miniku
beCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8441 NodeName:} Nodes:[{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.28.4 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPor
t:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 AutoPauseInterval:1m0s GPUs:}
	I1201 18:59:55.761545  284652 start.go:913] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
	I1201 18:59:55.761652  284652 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I1201 18:59:55.847452  284652 info.go:266] docker info: {ID:EOU5:DNGX:XN6V:L2FZ:UXRM:5TWK:EVUR:KC2F:GT7Z:Y4O4:GB77:5PD3 Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:35 OomKillDisable:true NGoroutines:54 SystemTime:2023-12-01 18:59:55.829174796 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1050-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archi
tecture:aarch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8215040000 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-31-251 Labels:[] ExperimentalBuild:false ServerVersion:24.0.7 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f Expected:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f} RuncCommit:{ID:v1.1.10-0-g18a0cb0 Expected:v1.1.10-0-g18a0cb0} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.21.0]] Warnings:<nil>}}
	I1201 18:59:55.847893  284652 start_flags.go:931] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
	I1201 18:59:55.847955  284652 cni.go:84] Creating CNI manager for ""
	I1201 18:59:55.847963  284652 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
	I1201 18:59:55.847971  284652 start_flags.go:323] config:
	{Name:functional-616785 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f Memory:4000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.4 ClusterName:functional-616785 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:
containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8441 NodeName:} Nodes:[{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.28.4 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort
:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 AutoPauseInterval:1m0s GPUs:}
	I1201 18:59:55.849912  284652 out.go:177] * Starting control plane node functional-616785 in cluster functional-616785
	I1201 18:59:55.851651  284652 cache.go:121] Beginning downloading kic base image for docker with containerd
	I1201 18:59:55.853530  284652 out.go:177] * Pulling base image ...
	I1201 18:59:55.855193  284652 preload.go:132] Checking if preload exists for k8s version v1.28.4 and runtime containerd
	I1201 18:59:55.855237  284652 preload.go:148] Found local preload: /home/jenkins/minikube-integration/17703-252966/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.4-containerd-overlay2-arm64.tar.lz4
	I1201 18:59:55.855244  284652 cache.go:56] Caching tarball of preloaded images
	I1201 18:59:55.855280  284652 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f in local docker daemon
	I1201 18:59:55.855329  284652 preload.go:174] Found /home/jenkins/minikube-integration/17703-252966/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.4-containerd-overlay2-arm64.tar.lz4 in cache, skipping download
	I1201 18:59:55.855338  284652 cache.go:59] Finished verifying existence of preloaded tar for  v1.28.4 on containerd
	I1201 18:59:55.855451  284652 profile.go:148] Saving config to /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/config.json ...
	I1201 18:59:55.873456  284652 image.go:83] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f in local docker daemon, skipping pull
	I1201 18:59:55.873470  284652 cache.go:144] gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f exists in daemon, skipping load
	I1201 18:59:55.873483  284652 cache.go:194] Successfully downloaded all kic artifacts
	I1201 18:59:55.873531  284652 start.go:365] acquiring machines lock for functional-616785: {Name:mk7ba1999eae2f42d2c57a82c994f0f066e74e62 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I1201 18:59:55.873593  284652 start.go:369] acquired machines lock for "functional-616785" in 43.75µs
	I1201 18:59:55.873611  284652 start.go:96] Skipping create...Using existing machine configuration
	I1201 18:59:55.873615  284652 fix.go:54] fixHost starting: 
	I1201 18:59:55.873899  284652 cli_runner.go:164] Run: docker container inspect functional-616785 --format={{.State.Status}}
	I1201 18:59:55.893018  284652 fix.go:102] recreateIfNeeded on functional-616785: state=Running err=<nil>
	W1201 18:59:55.893046  284652 fix.go:128] unexpected machine state, will restart: <nil>
	I1201 18:59:55.895272  284652 out.go:177] * Updating the running docker "functional-616785" container ...
	I1201 18:59:55.896970  284652 machine.go:88] provisioning docker machine ...
	I1201 18:59:55.897001  284652 ubuntu.go:169] provisioning hostname "functional-616785"
	I1201 18:59:55.897079  284652 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-616785
	I1201 18:59:55.918341  284652 main.go:141] libmachine: Using SSH client type: native
	I1201 18:59:55.918764  284652 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3be600] 0x3c0d70 <nil>  [] 0s} 127.0.0.1 33098 <nil> <nil>}
	I1201 18:59:55.918775  284652 main.go:141] libmachine: About to run SSH command:
	sudo hostname functional-616785 && echo "functional-616785" | sudo tee /etc/hostname
	I1201 18:59:56.088809  284652 main.go:141] libmachine: SSH cmd err, output: <nil>: functional-616785
	
	I1201 18:59:56.088884  284652 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-616785
	I1201 18:59:56.109943  284652 main.go:141] libmachine: Using SSH client type: native
	I1201 18:59:56.110358  284652 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3be600] 0x3c0d70 <nil>  [] 0s} 127.0.0.1 33098 <nil> <nil>}
	I1201 18:59:56.110375  284652 main.go:141] libmachine: About to run SSH command:
	
			if ! grep -xq '.*\sfunctional-616785' /etc/hosts; then
				if grep -xq '127.0.1.1\s.*' /etc/hosts; then
					sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 functional-616785/g' /etc/hosts;
				else 
					echo '127.0.1.1 functional-616785' | sudo tee -a /etc/hosts; 
				fi
			fi
	I1201 18:59:56.262166  284652 main.go:141] libmachine: SSH cmd err, output: <nil>: 
	I1201 18:59:56.262184  284652 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/17703-252966/.minikube CaCertPath:/home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/17703-252966/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/17703-252966/.minikube}
	I1201 18:59:56.262208  284652 ubuntu.go:177] setting up certificates
	I1201 18:59:56.262218  284652 provision.go:83] configureAuth start
	I1201 18:59:56.262288  284652 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" functional-616785
	I1201 18:59:56.284151  284652 provision.go:138] copyHostCerts
	I1201 18:59:56.284209  284652 exec_runner.go:144] found /home/jenkins/minikube-integration/17703-252966/.minikube/ca.pem, removing ...
	I1201 18:59:56.284217  284652 exec_runner.go:203] rm: /home/jenkins/minikube-integration/17703-252966/.minikube/ca.pem
	I1201 18:59:56.284294  284652 exec_runner.go:151] cp: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/17703-252966/.minikube/ca.pem (1078 bytes)
	I1201 18:59:56.284398  284652 exec_runner.go:144] found /home/jenkins/minikube-integration/17703-252966/.minikube/cert.pem, removing ...
	I1201 18:59:56.284401  284652 exec_runner.go:203] rm: /home/jenkins/minikube-integration/17703-252966/.minikube/cert.pem
	I1201 18:59:56.284427  284652 exec_runner.go:151] cp: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/17703-252966/.minikube/cert.pem (1123 bytes)
	I1201 18:59:56.284516  284652 exec_runner.go:144] found /home/jenkins/minikube-integration/17703-252966/.minikube/key.pem, removing ...
	I1201 18:59:56.284521  284652 exec_runner.go:203] rm: /home/jenkins/minikube-integration/17703-252966/.minikube/key.pem
	I1201 18:59:56.284547  284652 exec_runner.go:151] cp: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/17703-252966/.minikube/key.pem (1679 bytes)
	I1201 18:59:56.284595  284652 provision.go:112] generating server cert: /home/jenkins/minikube-integration/17703-252966/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca-key.pem org=jenkins.functional-616785 san=[192.168.49.2 127.0.0.1 localhost 127.0.0.1 minikube functional-616785]
	I1201 18:59:56.682978  284652 provision.go:172] copyRemoteCerts
	I1201 18:59:56.683037  284652 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
	I1201 18:59:56.683077  284652 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-616785
	I1201 18:59:56.708095  284652 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33098 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/functional-616785/id_rsa Username:docker}
	I1201 18:59:56.815437  284652 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
	I1201 18:59:56.850398  284652 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/machines/server.pem --> /etc/docker/server.pem (1229 bytes)
	I1201 18:59:56.881389  284652 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
	I1201 18:59:56.911608  284652 provision.go:86] duration metric: configureAuth took 649.363698ms
	I1201 18:59:56.911625  284652 ubuntu.go:193] setting minikube options for container-runtime
	I1201 18:59:56.911822  284652 config.go:182] Loaded profile config "functional-616785": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.4
	I1201 18:59:56.911828  284652 machine.go:91] provisioned docker machine in 1.014842252s
	I1201 18:59:56.911833  284652 start.go:300] post-start starting for "functional-616785" (driver="docker")
	I1201 18:59:56.911844  284652 start.go:329] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
	I1201 18:59:56.911894  284652 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
	I1201 18:59:56.911930  284652 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-616785
	I1201 18:59:56.930511  284652 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33098 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/functional-616785/id_rsa Username:docker}
	I1201 18:59:57.036977  284652 ssh_runner.go:195] Run: cat /etc/os-release
	I1201 18:59:57.041577  284652 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
	I1201 18:59:57.041603  284652 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
	I1201 18:59:57.041612  284652 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
	I1201 18:59:57.041619  284652 info.go:137] Remote host: Ubuntu 22.04.3 LTS
	I1201 18:59:57.041628  284652 filesync.go:126] Scanning /home/jenkins/minikube-integration/17703-252966/.minikube/addons for local assets ...
	I1201 18:59:57.041688  284652 filesync.go:126] Scanning /home/jenkins/minikube-integration/17703-252966/.minikube/files for local assets ...
	I1201 18:59:57.041764  284652 filesync.go:149] local asset: /home/jenkins/minikube-integration/17703-252966/.minikube/files/etc/ssl/certs/2583012.pem -> 2583012.pem in /etc/ssl/certs
	I1201 18:59:57.041841  284652 filesync.go:149] local asset: /home/jenkins/minikube-integration/17703-252966/.minikube/files/etc/test/nested/copy/258301/hosts -> hosts in /etc/test/nested/copy/258301
	I1201 18:59:57.041896  284652 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs /etc/test/nested/copy/258301
	I1201 18:59:57.054186  284652 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/files/etc/ssl/certs/2583012.pem --> /etc/ssl/certs/2583012.pem (1708 bytes)
	I1201 18:59:57.086073  284652 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/files/etc/test/nested/copy/258301/hosts --> /etc/test/nested/copy/258301/hosts (40 bytes)
	I1201 18:59:57.118038  284652 start.go:303] post-start completed in 206.189948ms
	I1201 18:59:57.118126  284652 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I1201 18:59:57.118167  284652 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-616785
	I1201 18:59:57.137858  284652 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33098 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/functional-616785/id_rsa Username:docker}
	I1201 18:59:57.238766  284652 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
	I1201 18:59:57.245250  284652 fix.go:56] fixHost completed within 1.371626283s
	I1201 18:59:57.245266  284652 start.go:83] releasing machines lock for "functional-616785", held for 1.371666038s
	I1201 18:59:57.245347  284652 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" functional-616785
	I1201 18:59:57.263909  284652 ssh_runner.go:195] Run: cat /version.json
	I1201 18:59:57.263954  284652 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-616785
	I1201 18:59:57.264228  284652 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
	I1201 18:59:57.264281  284652 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-616785
	I1201 18:59:57.282625  284652 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33098 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/functional-616785/id_rsa Username:docker}
	I1201 18:59:57.304543  284652 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33098 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/functional-616785/id_rsa Username:docker}
	I1201 18:59:57.389146  284652 ssh_runner.go:195] Run: systemctl --version
	I1201 18:59:57.527936  284652 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
	I1201 18:59:57.533677  284652 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
	I1201 18:59:57.557046  284652 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
	I1201 18:59:57.557116  284652 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%!p(MISSING), " -exec sh -c "sudo mv {} {}.mk_disabled" ;
	I1201 18:59:57.567961  284652 cni.go:259] no active bridge cni configs found in "/etc/cni/net.d" - nothing to disable
	I1201 18:59:57.567984  284652 start.go:475] detecting cgroup driver to use...
	I1201 18:59:57.568024  284652 detect.go:196] detected "cgroupfs" cgroup driver on host os
	I1201 18:59:57.568072  284652 ssh_runner.go:195] Run: sudo systemctl stop -f crio
	I1201 18:59:57.583616  284652 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
	I1201 18:59:57.597365  284652 docker.go:203] disabling cri-docker service (if available) ...
	I1201 18:59:57.597425  284652 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
	I1201 18:59:57.612564  284652 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
	I1201 18:59:57.627735  284652 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
	I1201 18:59:57.744845  284652 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
	I1201 18:59:57.872977  284652 docker.go:219] disabling docker service ...
	I1201 18:59:57.873042  284652 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
	I1201 18:59:57.887845  284652 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
	I1201 18:59:57.901426  284652 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
	I1201 18:59:58.027596  284652 ssh_runner.go:195] Run: sudo systemctl mask docker.service
	I1201 18:59:58.155228  284652 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
	I1201 18:59:58.169700  284652 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %!s(MISSING) "runtime-endpoint: unix:///run/containerd/containerd.sock
	" | sudo tee /etc/crictl.yaml"
	I1201 18:59:58.190749  284652 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.9"|' /etc/containerd/config.toml"
	I1201 18:59:58.203508  284652 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
	I1201 18:59:58.217134  284652 containerd.go:145] configuring containerd to use "cgroupfs" as cgroup driver...
	I1201 18:59:58.217201  284652 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
	I1201 18:59:58.230118  284652 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I1201 18:59:58.242232  284652 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
	I1201 18:59:58.254343  284652 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I1201 18:59:58.267085  284652 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
	I1201 18:59:58.278909  284652 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
	I1201 18:59:58.292855  284652 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
	I1201 18:59:58.303063  284652 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
	I1201 18:59:58.313254  284652 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1201 18:59:58.442855  284652 ssh_runner.go:195] Run: sudo systemctl restart containerd
	I1201 18:59:58.659034  284652 start.go:522] Will wait 60s for socket path /run/containerd/containerd.sock
	I1201 18:59:58.659105  284652 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
	I1201 18:59:58.664191  284652 start.go:543] Will wait 60s for crictl version
	I1201 18:59:58.664244  284652 ssh_runner.go:195] Run: which crictl
	I1201 18:59:58.668768  284652 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
	I1201 18:59:58.711585  284652 start.go:559] Version:  0.1.0
	RuntimeName:  containerd
	RuntimeVersion:  1.6.25
	RuntimeApiVersion:  v1
	I1201 18:59:58.711654  284652 ssh_runner.go:195] Run: containerd --version
	I1201 18:59:58.743914  284652 ssh_runner.go:195] Run: containerd --version
	I1201 18:59:58.774417  284652 out.go:177] * Preparing Kubernetes v1.28.4 on containerd 1.6.25 ...
	I1201 18:59:58.776243  284652 cli_runner.go:164] Run: docker network inspect functional-616785 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
	I1201 18:59:58.807956  284652 ssh_runner.go:195] Run: grep 192.168.49.1	host.minikube.internal$ /etc/hosts
	I1201 18:59:58.815592  284652 out.go:177]   - apiserver.enable-admission-plugins=NamespaceAutoProvision
	I1201 18:59:58.817338  284652 preload.go:132] Checking if preload exists for k8s version v1.28.4 and runtime containerd
	I1201 18:59:58.817417  284652 ssh_runner.go:195] Run: sudo crictl images --output json
	I1201 18:59:58.862005  284652 containerd.go:604] all images are preloaded for containerd runtime.
	I1201 18:59:58.862017  284652 containerd.go:518] Images already preloaded, skipping extraction
	I1201 18:59:58.862068  284652 ssh_runner.go:195] Run: sudo crictl images --output json
	I1201 18:59:58.904434  284652 containerd.go:604] all images are preloaded for containerd runtime.
	I1201 18:59:58.904450  284652 cache_images.go:84] Images are preloaded, skipping loading
	I1201 18:59:58.904561  284652 ssh_runner.go:195] Run: sudo crictl info
	I1201 18:59:58.945488  284652 extraconfig.go:124] Overwriting default enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota with user provided enable-admission-plugins=NamespaceAutoProvision for component apiserver
	I1201 18:59:58.945515  284652 cni.go:84] Creating CNI manager for ""
	I1201 18:59:58.945526  284652 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
	I1201 18:59:58.945533  284652 kubeadm.go:87] Using pod CIDR: 10.244.0.0/16
	I1201 18:59:58.945549  284652 kubeadm.go:176] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8441 KubernetesVersion:v1.28.4 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:functional-616785 NodeName:functional-616785 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceAutoProvision] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfi
gOpts:map[hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
	I1201 18:59:58.945665  284652 kubeadm.go:181] kubeadm config:
	apiVersion: kubeadm.k8s.io/v1beta3
	kind: InitConfiguration
	localAPIEndpoint:
	  advertiseAddress: 192.168.49.2
	  bindPort: 8441
	bootstrapTokens:
	  - groups:
	      - system:bootstrappers:kubeadm:default-node-token
	    ttl: 24h0m0s
	    usages:
	      - signing
	      - authentication
	nodeRegistration:
	  criSocket: unix:///run/containerd/containerd.sock
	  name: "functional-616785"
	  kubeletExtraArgs:
	    node-ip: 192.168.49.2
	  taints: []
	---
	apiVersion: kubeadm.k8s.io/v1beta3
	kind: ClusterConfiguration
	apiServer:
	  certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
	  extraArgs:
	    enable-admission-plugins: "NamespaceAutoProvision"
	controllerManager:
	  extraArgs:
	    allocate-node-cidrs: "true"
	    leader-elect: "false"
	scheduler:
	  extraArgs:
	    leader-elect: "false"
	certificatesDir: /var/lib/minikube/certs
	clusterName: mk
	controlPlaneEndpoint: control-plane.minikube.internal:8441
	etcd:
	  local:
	    dataDir: /var/lib/minikube/etcd
	    extraArgs:
	      proxy-refresh-interval: "70000"
	kubernetesVersion: v1.28.4
	networking:
	  dnsDomain: cluster.local
	  podSubnet: "10.244.0.0/16"
	  serviceSubnet: 10.96.0.0/12
	---
	apiVersion: kubelet.config.k8s.io/v1beta1
	kind: KubeletConfiguration
	authentication:
	  x509:
	    clientCAFile: /var/lib/minikube/certs/ca.crt
	cgroupDriver: cgroupfs
	hairpinMode: hairpin-veth
	runtimeRequestTimeout: 15m
	clusterDomain: "cluster.local"
	# disable disk resource management by default
	imageGCHighThresholdPercent: 100
	evictionHard:
	  nodefs.available: "0%!"(MISSING)
	  nodefs.inodesFree: "0%!"(MISSING)
	  imagefs.available: "0%!"(MISSING)
	failSwapOn: false
	staticPodPath: /etc/kubernetes/manifests
	---
	apiVersion: kubeproxy.config.k8s.io/v1alpha1
	kind: KubeProxyConfiguration
	clusterCIDR: "10.244.0.0/16"
	metricsBindAddress: 0.0.0.0:10249
	conntrack:
	  maxPerCore: 0
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
	  tcpEstablishedTimeout: 0s
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
	  tcpCloseWaitTimeout: 0s
	
	I1201 18:59:58.945726  284652 kubeadm.go:976] kubelet [Unit]
	Wants=containerd.service
	
	[Service]
	ExecStart=
	ExecStart=/var/lib/minikube/binaries/v1.28.4/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --container-runtime-endpoint=unix:///run/containerd/containerd.sock --hostname-override=functional-616785 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
	
	[Install]
	 config:
	{KubernetesVersion:v1.28.4 ClusterName:functional-616785 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8441 NodeName:}
	I1201 18:59:58.945786  284652 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.28.4
	I1201 18:59:58.956311  284652 binaries.go:44] Found k8s binaries, skipping transfer
	I1201 18:59:58.956381  284652 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
	I1201 18:59:58.966753  284652 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (389 bytes)
	I1201 18:59:58.989937  284652 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
	I1201 18:59:59.014337  284652 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (1956 bytes)
	I1201 18:59:59.037366  284652 ssh_runner.go:195] Run: grep 192.168.49.2	control-plane.minikube.internal$ /etc/hosts
	I1201 18:59:59.042025  284652 certs.go:56] Setting up /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785 for IP: 192.168.49.2
	I1201 18:59:59.042048  284652 certs.go:190] acquiring lock for shared ca certs: {Name:mk799b1e63d23a413d1b6e34a0169dabbea1b951 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1201 18:59:59.042193  284652 certs.go:199] skipping minikubeCA CA generation: /home/jenkins/minikube-integration/17703-252966/.minikube/ca.key
	I1201 18:59:59.042238  284652 certs.go:199] skipping proxyClientCA CA generation: /home/jenkins/minikube-integration/17703-252966/.minikube/proxy-client-ca.key
	I1201 18:59:59.042310  284652 certs.go:315] skipping minikube-user signed cert generation: /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/client.key
	I1201 18:59:59.042352  284652 certs.go:315] skipping minikube signed cert generation: /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/apiserver.key.dd3b5fb2
	I1201 18:59:59.042402  284652 certs.go:315] skipping aggregator signed cert generation: /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/proxy-client.key
	I1201 18:59:59.042510  284652 certs.go:437] found cert: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/home/jenkins/minikube-integration/17703-252966/.minikube/certs/258301.pem (1338 bytes)
	W1201 18:59:59.042536  284652 certs.go:433] ignoring /home/jenkins/minikube-integration/17703-252966/.minikube/certs/home/jenkins/minikube-integration/17703-252966/.minikube/certs/258301_empty.pem, impossibly tiny 0 bytes
	I1201 18:59:59.042544  284652 certs.go:437] found cert: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca-key.pem (1675 bytes)
	I1201 18:59:59.042568  284652 certs.go:437] found cert: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca.pem (1078 bytes)
	I1201 18:59:59.042592  284652 certs.go:437] found cert: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/home/jenkins/minikube-integration/17703-252966/.minikube/certs/cert.pem (1123 bytes)
	I1201 18:59:59.042617  284652 certs.go:437] found cert: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/home/jenkins/minikube-integration/17703-252966/.minikube/certs/key.pem (1679 bytes)
	I1201 18:59:59.042683  284652 certs.go:437] found cert: /home/jenkins/minikube-integration/17703-252966/.minikube/files/etc/ssl/certs/home/jenkins/minikube-integration/17703-252966/.minikube/files/etc/ssl/certs/2583012.pem (1708 bytes)
	I1201 18:59:59.043457  284652 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1399 bytes)
	I1201 18:59:59.073387  284652 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
	I1201 18:59:59.105153  284652 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
	I1201 18:59:59.138360  284652 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
	I1201 18:59:59.167982  284652 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
	I1201 18:59:59.198786  284652 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
	I1201 18:59:59.230510  284652 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
	I1201 18:59:59.259864  284652 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
	I1201 18:59:59.290234  284652 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
	I1201 18:59:59.319681  284652 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/certs/258301.pem --> /usr/share/ca-certificates/258301.pem (1338 bytes)
	I1201 18:59:59.349001  284652 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/files/etc/ssl/certs/2583012.pem --> /usr/share/ca-certificates/2583012.pem (1708 bytes)
	I1201 18:59:59.379151  284652 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
	I1201 18:59:59.400764  284652 ssh_runner.go:195] Run: openssl version
	I1201 18:59:59.408388  284652 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
	I1201 18:59:59.420594  284652 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
	I1201 18:59:59.425790  284652 certs.go:480] hashing: -rw-r--r-- 1 root root 1111 Dec  1 18:52 /usr/share/ca-certificates/minikubeCA.pem
	I1201 18:59:59.425848  284652 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
	I1201 18:59:59.434615  284652 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
	I1201 18:59:59.445634  284652 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/258301.pem && ln -fs /usr/share/ca-certificates/258301.pem /etc/ssl/certs/258301.pem"
	I1201 18:59:59.457374  284652 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/258301.pem
	I1201 18:59:59.462082  284652 certs.go:480] hashing: -rw-r--r-- 1 root root 1338 Dec  1 18:58 /usr/share/ca-certificates/258301.pem
	I1201 18:59:59.462148  284652 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/258301.pem
	I1201 18:59:59.470831  284652 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/258301.pem /etc/ssl/certs/51391683.0"
	I1201 18:59:59.481952  284652 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/2583012.pem && ln -fs /usr/share/ca-certificates/2583012.pem /etc/ssl/certs/2583012.pem"
	I1201 18:59:59.494085  284652 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/2583012.pem
	I1201 18:59:59.499453  284652 certs.go:480] hashing: -rw-r--r-- 1 root root 1708 Dec  1 18:58 /usr/share/ca-certificates/2583012.pem
	I1201 18:59:59.499520  284652 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/2583012.pem
	I1201 18:59:59.508372  284652 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/2583012.pem /etc/ssl/certs/3ec20f2e.0"
	I1201 18:59:59.519920  284652 ssh_runner.go:195] Run: ls /var/lib/minikube/certs/etcd
	I1201 18:59:59.524900  284652 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-etcd-client.crt -checkend 86400
	I1201 18:59:59.534001  284652 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-kubelet-client.crt -checkend 86400
	I1201 18:59:59.542551  284652 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/server.crt -checkend 86400
	I1201 18:59:59.551194  284652 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/healthcheck-client.crt -checkend 86400
	I1201 18:59:59.559847  284652 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/peer.crt -checkend 86400
	I1201 18:59:59.568456  284652 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/front-proxy-client.crt -checkend 86400
	I1201 18:59:59.577088  284652 kubeadm.go:404] StartCluster: {Name:functional-616785 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f Memory:4000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.4 ClusterName:functional-616785 Namespace:default APIServerName:minikubeCA APIServerNames:
[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8441 NodeName:} Nodes:[{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.28.4 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID
:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 AutoPauseInterval:1m0s GPUs:}
	I1201 18:59:59.577165  284652 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
	I1201 18:59:59.577227  284652 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
	I1201 18:59:59.619843  284652 cri.go:89] found id: "172a910da74d5d4bd72533e43c42af2574c81da2cb8735f3148fb22366fc7b38"
	I1201 18:59:59.619856  284652 cri.go:89] found id: "6f24a29e8dde904ee7c79191a05e4233a4d3c4611dec66af1e784899ffd2f4fa"
	I1201 18:59:59.619861  284652 cri.go:89] found id: "9efdb32584bcd1c4938cb1074c27c1b3d95cdc91deea1013f2b99669c0621313"
	I1201 18:59:59.619865  284652 cri.go:89] found id: "1fb932a179c7041614a4fcc5715379bec1bebfa33c81139f5df677e9fa11945f"
	I1201 18:59:59.619868  284652 cri.go:89] found id: "214de4fd9c7633a02aa05db38f69168fa0962e31a183a34b3a3afb40315ef80e"
	I1201 18:59:59.619872  284652 cri.go:89] found id: "0c2d4da8c8cce66a5ddb65b0ca03546edefdf740f59669699af3f7f312913564"
	I1201 18:59:59.619876  284652 cri.go:89] found id: "2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648"
	I1201 18:59:59.619879  284652 cri.go:89] found id: "6353341ab4be94e3126a82f86e5b7b6c810a95a11c1532a6524b40001c56ec1e"
	I1201 18:59:59.619883  284652 cri.go:89] found id: "a01aebe75ba55e11656030032922a9fe89ca68f6b88e61a6565e71e38c259849"
	I1201 18:59:59.619897  284652 cri.go:89] found id: ""
	I1201 18:59:59.619947  284652 ssh_runner.go:195] Run: sudo runc --root /run/containerd/runc/k8s.io list -f json
	I1201 18:59:59.654556  284652 cri.go:116] JSON = [{"ociVersion":"1.0.2-dev","id":"0c2d4da8c8cce66a5ddb65b0ca03546edefdf740f59669699af3f7f312913564","pid":1315,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/0c2d4da8c8cce66a5ddb65b0ca03546edefdf740f59669699af3f7f312913564","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/0c2d4da8c8cce66a5ddb65b0ca03546edefdf740f59669699af3f7f312913564/rootfs","created":"2023-12-01T18:59:00.62864878Z","annotations":{"io.kubernetes.cri.container-name":"etcd","io.kubernetes.cri.container-type":"container","io.kubernetes.cri.image-name":"registry.k8s.io/etcd:3.5.9-0","io.kubernetes.cri.sandbox-id":"5eee9bb6b890be25088c43c50c60ee0dd94c9533df6a93fd95e571b722f603e8","io.kubernetes.cri.sandbox-name":"etcd-functional-616785","io.kubernetes.cri.sandbox-namespace":"kube-system","io.kubernetes.cri.sandbox-uid":"7d7369424179d18b810696b6cd5e0c34"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"172a910da74d5d4bd72533e43c42af2574c81da2
cb8735f3148fb22366fc7b38","pid":2893,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/172a910da74d5d4bd72533e43c42af2574c81da2cb8735f3148fb22366fc7b38","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/172a910da74d5d4bd72533e43c42af2574c81da2cb8735f3148fb22366fc7b38/rootfs","created":"2023-12-01T18:59:53.7126854Z","annotations":{"io.kubernetes.cri.container-name":"storage-provisioner","io.kubernetes.cri.container-type":"container","io.kubernetes.cri.image-name":"gcr.io/k8s-minikube/storage-provisioner:v5","io.kubernetes.cri.sandbox-id":"8b55a4531eadf2b7b7ac4b6e98d285594d33022051b789a55aac0bc3c5faebf2","io.kubernetes.cri.sandbox-name":"storage-provisioner","io.kubernetes.cri.sandbox-namespace":"kube-system","io.kubernetes.cri.sandbox-uid":"393607ea-a066-4d39-93eb-75c52a6ab29e"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"1ade3bcae44a0456b38605717b59a54930e914ca80145f4a6a42f69f2c89bc4c","pid":1804,"status":"running","bundle":"/run/containerd/io.containerd.runtim
e.v2.task/k8s.io/1ade3bcae44a0456b38605717b59a54930e914ca80145f4a6a42f69f2c89bc4c","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/1ade3bcae44a0456b38605717b59a54930e914ca80145f4a6a42f69f2c89bc4c/rootfs","created":"2023-12-01T18:59:23.301279068Z","annotations":{"io.kubernetes.cri.container-type":"sandbox","io.kubernetes.cri.sandbox-cpu-period":"100000","io.kubernetes.cri.sandbox-cpu-quota":"10000","io.kubernetes.cri.sandbox-cpu-shares":"102","io.kubernetes.cri.sandbox-id":"1ade3bcae44a0456b38605717b59a54930e914ca80145f4a6a42f69f2c89bc4c","io.kubernetes.cri.sandbox-log-directory":"/var/log/pods/kube-system_kindnet-6zm7x_e02380f9-bcfc-4a99-a5c0-e2372e5f0ad4","io.kubernetes.cri.sandbox-memory":"52428800","io.kubernetes.cri.sandbox-name":"kindnet-6zm7x","io.kubernetes.cri.sandbox-namespace":"kube-system","io.kubernetes.cri.sandbox-uid":"e02380f9-bcfc-4a99-a5c0-e2372e5f0ad4"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"1fb932a179c7041614a4fcc5715379bec1bebfa33c81139f5df677e9fa11945f","pid":1855,
"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/1fb932a179c7041614a4fcc5715379bec1bebfa33c81139f5df677e9fa11945f","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/1fb932a179c7041614a4fcc5715379bec1bebfa33c81139f5df677e9fa11945f/rootfs","created":"2023-12-01T18:59:23.40394988Z","annotations":{"io.kubernetes.cri.container-name":"kube-proxy","io.kubernetes.cri.container-type":"container","io.kubernetes.cri.image-name":"registry.k8s.io/kube-proxy:v1.28.4","io.kubernetes.cri.sandbox-id":"d7b2917b16b33132226cd5898d50cbfcba0d36be4be642e783846162063ff175","io.kubernetes.cri.sandbox-name":"kube-proxy-d8cvf","io.kubernetes.cri.sandbox-namespace":"kube-system","io.kubernetes.cri.sandbox-uid":"f95b7b7d-84db-44db-8038-dcfd7e1ab770"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648","pid":1326,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/2688cc66adf8e4f8e9a5996f8a27f4527b00c77
41e59aadea1022fc868f95648","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648/rootfs","created":"2023-12-01T18:59:00.648416121Z","annotations":{"io.kubernetes.cri.container-name":"kube-apiserver","io.kubernetes.cri.container-type":"container","io.kubernetes.cri.image-name":"registry.k8s.io/kube-apiserver:v1.28.4","io.kubernetes.cri.sandbox-id":"f39292a0989585111b50d4115a545d232df69b9f78a22cca8cad460e8ee16199","io.kubernetes.cri.sandbox-name":"kube-apiserver-functional-616785","io.kubernetes.cri.sandbox-namespace":"kube-system","io.kubernetes.cri.sandbox-uid":"399196d44eb6c509f7b60d72c5662125"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"35fdb6cba245d4380dd978af7332e0abf05039d07409269e23de390bfcaa6950","pid":2105,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/35fdb6cba245d4380dd978af7332e0abf05039d07409269e23de390bfcaa6950","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/35fdb6cba2
45d4380dd978af7332e0abf05039d07409269e23de390bfcaa6950/rootfs","created":"2023-12-01T18:59:37.461761511Z","annotations":{"io.kubernetes.cri.container-type":"sandbox","io.kubernetes.cri.sandbox-cpu-period":"100000","io.kubernetes.cri.sandbox-cpu-quota":"0","io.kubernetes.cri.sandbox-cpu-shares":"102","io.kubernetes.cri.sandbox-id":"35fdb6cba245d4380dd978af7332e0abf05039d07409269e23de390bfcaa6950","io.kubernetes.cri.sandbox-log-directory":"/var/log/pods/kube-system_coredns-5dd5756b68-ts5dh_daaa92f6-6743-4e3e-a5e9-0d3bea42c1e0","io.kubernetes.cri.sandbox-memory":"178257920","io.kubernetes.cri.sandbox-name":"coredns-5dd5756b68-ts5dh","io.kubernetes.cri.sandbox-namespace":"kube-system","io.kubernetes.cri.sandbox-uid":"daaa92f6-6743-4e3e-a5e9-0d3bea42c1e0"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"5eee9bb6b890be25088c43c50c60ee0dd94c9533df6a93fd95e571b722f603e8","pid":1174,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/5eee9bb6b890be25088c43c50c60ee0dd94c9533df6a93fd95e571b
722f603e8","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/5eee9bb6b890be25088c43c50c60ee0dd94c9533df6a93fd95e571b722f603e8/rootfs","created":"2023-12-01T18:59:00.426326168Z","annotations":{"io.kubernetes.cri.container-type":"sandbox","io.kubernetes.cri.sandbox-cpu-period":"100000","io.kubernetes.cri.sandbox-cpu-quota":"0","io.kubernetes.cri.sandbox-cpu-shares":"102","io.kubernetes.cri.sandbox-id":"5eee9bb6b890be25088c43c50c60ee0dd94c9533df6a93fd95e571b722f603e8","io.kubernetes.cri.sandbox-log-directory":"/var/log/pods/kube-system_etcd-functional-616785_7d7369424179d18b810696b6cd5e0c34","io.kubernetes.cri.sandbox-memory":"0","io.kubernetes.cri.sandbox-name":"etcd-functional-616785","io.kubernetes.cri.sandbox-namespace":"kube-system","io.kubernetes.cri.sandbox-uid":"7d7369424179d18b810696b6cd5e0c34"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"6353341ab4be94e3126a82f86e5b7b6c810a95a11c1532a6524b40001c56ec1e","pid":1327,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.tas
k/k8s.io/6353341ab4be94e3126a82f86e5b7b6c810a95a11c1532a6524b40001c56ec1e","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/6353341ab4be94e3126a82f86e5b7b6c810a95a11c1532a6524b40001c56ec1e/rootfs","created":"2023-12-01T18:59:00.685840617Z","annotations":{"io.kubernetes.cri.container-name":"kube-scheduler","io.kubernetes.cri.container-type":"container","io.kubernetes.cri.image-name":"registry.k8s.io/kube-scheduler:v1.28.4","io.kubernetes.cri.sandbox-id":"98d1c2867c3ea4c91188d8e481c880102506a3796f45f4a9f7a39603f311a3ec","io.kubernetes.cri.sandbox-name":"kube-scheduler-functional-616785","io.kubernetes.cri.sandbox-namespace":"kube-system","io.kubernetes.cri.sandbox-uid":"b5714ae4c71e32df6a08d33071c9d40f"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"6f24a29e8dde904ee7c79191a05e4233a4d3c4611dec66af1e784899ffd2f4fa","pid":2137,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/6f24a29e8dde904ee7c79191a05e4233a4d3c4611dec66af1e784899ffd2f4fa","rootfs":"/run/containerd
/io.containerd.runtime.v2.task/k8s.io/6f24a29e8dde904ee7c79191a05e4233a4d3c4611dec66af1e784899ffd2f4fa/rootfs","created":"2023-12-01T18:59:37.566412994Z","annotations":{"io.kubernetes.cri.container-name":"coredns","io.kubernetes.cri.container-type":"container","io.kubernetes.cri.image-name":"registry.k8s.io/coredns/coredns:v1.10.1","io.kubernetes.cri.sandbox-id":"35fdb6cba245d4380dd978af7332e0abf05039d07409269e23de390bfcaa6950","io.kubernetes.cri.sandbox-name":"coredns-5dd5756b68-ts5dh","io.kubernetes.cri.sandbox-namespace":"kube-system","io.kubernetes.cri.sandbox-uid":"daaa92f6-6743-4e3e-a5e9-0d3bea42c1e0"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"8b55a4531eadf2b7b7ac4b6e98d285594d33022051b789a55aac0bc3c5faebf2","pid":1704,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/8b55a4531eadf2b7b7ac4b6e98d285594d33022051b789a55aac0bc3c5faebf2","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/8b55a4531eadf2b7b7ac4b6e98d285594d33022051b789a55aac0bc3c5faebf2/rootfs"
,"created":"2023-12-01T18:59:22.992763511Z","annotations":{"io.kubernetes.cri.container-type":"sandbox","io.kubernetes.cri.sandbox-cpu-period":"100000","io.kubernetes.cri.sandbox-cpu-quota":"0","io.kubernetes.cri.sandbox-cpu-shares":"2","io.kubernetes.cri.sandbox-id":"8b55a4531eadf2b7b7ac4b6e98d285594d33022051b789a55aac0bc3c5faebf2","io.kubernetes.cri.sandbox-log-directory":"/var/log/pods/kube-system_storage-provisioner_393607ea-a066-4d39-93eb-75c52a6ab29e","io.kubernetes.cri.sandbox-memory":"0","io.kubernetes.cri.sandbox-name":"storage-provisioner","io.kubernetes.cri.sandbox-namespace":"kube-system","io.kubernetes.cri.sandbox-uid":"393607ea-a066-4d39-93eb-75c52a6ab29e"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"98d1c2867c3ea4c91188d8e481c880102506a3796f45f4a9f7a39603f311a3ec","pid":1175,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/98d1c2867c3ea4c91188d8e481c880102506a3796f45f4a9f7a39603f311a3ec","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/98d1c286
7c3ea4c91188d8e481c880102506a3796f45f4a9f7a39603f311a3ec/rootfs","created":"2023-12-01T18:59:00.41758843Z","annotations":{"io.kubernetes.cri.container-type":"sandbox","io.kubernetes.cri.sandbox-cpu-period":"100000","io.kubernetes.cri.sandbox-cpu-quota":"0","io.kubernetes.cri.sandbox-cpu-shares":"102","io.kubernetes.cri.sandbox-id":"98d1c2867c3ea4c91188d8e481c880102506a3796f45f4a9f7a39603f311a3ec","io.kubernetes.cri.sandbox-log-directory":"/var/log/pods/kube-system_kube-scheduler-functional-616785_b5714ae4c71e32df6a08d33071c9d40f","io.kubernetes.cri.sandbox-memory":"0","io.kubernetes.cri.sandbox-name":"kube-scheduler-functional-616785","io.kubernetes.cri.sandbox-namespace":"kube-system","io.kubernetes.cri.sandbox-uid":"b5714ae4c71e32df6a08d33071c9d40f"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"9efdb32584bcd1c4938cb1074c27c1b3d95cdc91deea1013f2b99669c0621313","pid":1871,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/9efdb32584bcd1c4938cb1074c27c1b3d95cdc91deea1013f2b996
69c0621313","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/9efdb32584bcd1c4938cb1074c27c1b3d95cdc91deea1013f2b99669c0621313/rootfs","created":"2023-12-01T18:59:23.51488378Z","annotations":{"io.kubernetes.cri.container-name":"kindnet-cni","io.kubernetes.cri.container-type":"container","io.kubernetes.cri.image-name":"docker.io/kindest/kindnetd:v20230809-80a64d96","io.kubernetes.cri.sandbox-id":"1ade3bcae44a0456b38605717b59a54930e914ca80145f4a6a42f69f2c89bc4c","io.kubernetes.cri.sandbox-name":"kindnet-6zm7x","io.kubernetes.cri.sandbox-namespace":"kube-system","io.kubernetes.cri.sandbox-uid":"e02380f9-bcfc-4a99-a5c0-e2372e5f0ad4"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"a01aebe75ba55e11656030032922a9fe89ca68f6b88e61a6565e71e38c259849","pid":1250,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/a01aebe75ba55e11656030032922a9fe89ca68f6b88e61a6565e71e38c259849","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/a01aebe75ba55e11656030032922a9fe89ca6
8f6b88e61a6565e71e38c259849/rootfs","created":"2023-12-01T18:59:00.531247634Z","annotations":{"io.kubernetes.cri.container-name":"kube-controller-manager","io.kubernetes.cri.container-type":"container","io.kubernetes.cri.image-name":"registry.k8s.io/kube-controller-manager:v1.28.4","io.kubernetes.cri.sandbox-id":"c8f61359ddffa0351dabf4ab914d2d3c8bbd85a95648cf27a519b32bfd41bd9e","io.kubernetes.cri.sandbox-name":"kube-controller-manager-functional-616785","io.kubernetes.cri.sandbox-namespace":"kube-system","io.kubernetes.cri.sandbox-uid":"012a17049c3f357a0b12f711f68b3301"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"c8f61359ddffa0351dabf4ab914d2d3c8bbd85a95648cf27a519b32bfd41bd9e","pid":1159,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/c8f61359ddffa0351dabf4ab914d2d3c8bbd85a95648cf27a519b32bfd41bd9e","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/c8f61359ddffa0351dabf4ab914d2d3c8bbd85a95648cf27a519b32bfd41bd9e/rootfs","created":"2023-12-01T18:59:00.393035
366Z","annotations":{"io.kubernetes.cri.container-type":"sandbox","io.kubernetes.cri.sandbox-cpu-period":"100000","io.kubernetes.cri.sandbox-cpu-quota":"0","io.kubernetes.cri.sandbox-cpu-shares":"204","io.kubernetes.cri.sandbox-id":"c8f61359ddffa0351dabf4ab914d2d3c8bbd85a95648cf27a519b32bfd41bd9e","io.kubernetes.cri.sandbox-log-directory":"/var/log/pods/kube-system_kube-controller-manager-functional-616785_012a17049c3f357a0b12f711f68b3301","io.kubernetes.cri.sandbox-memory":"0","io.kubernetes.cri.sandbox-name":"kube-controller-manager-functional-616785","io.kubernetes.cri.sandbox-namespace":"kube-system","io.kubernetes.cri.sandbox-uid":"012a17049c3f357a0b12f711f68b3301"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"d7b2917b16b33132226cd5898d50cbfcba0d36be4be642e783846162063ff175","pid":1811,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/d7b2917b16b33132226cd5898d50cbfcba0d36be4be642e783846162063ff175","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/d7b2917b
16b33132226cd5898d50cbfcba0d36be4be642e783846162063ff175/rootfs","created":"2023-12-01T18:59:23.291168037Z","annotations":{"io.kubernetes.cri.container-type":"sandbox","io.kubernetes.cri.sandbox-cpu-period":"100000","io.kubernetes.cri.sandbox-cpu-quota":"0","io.kubernetes.cri.sandbox-cpu-shares":"2","io.kubernetes.cri.sandbox-id":"d7b2917b16b33132226cd5898d50cbfcba0d36be4be642e783846162063ff175","io.kubernetes.cri.sandbox-log-directory":"/var/log/pods/kube-system_kube-proxy-d8cvf_f95b7b7d-84db-44db-8038-dcfd7e1ab770","io.kubernetes.cri.sandbox-memory":"0","io.kubernetes.cri.sandbox-name":"kube-proxy-d8cvf","io.kubernetes.cri.sandbox-namespace":"kube-system","io.kubernetes.cri.sandbox-uid":"f95b7b7d-84db-44db-8038-dcfd7e1ab770"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"f39292a0989585111b50d4115a545d232df69b9f78a22cca8cad460e8ee16199","pid":1177,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/f39292a0989585111b50d4115a545d232df69b9f78a22cca8cad460e8ee16199","rootfs":"/ru
n/containerd/io.containerd.runtime.v2.task/k8s.io/f39292a0989585111b50d4115a545d232df69b9f78a22cca8cad460e8ee16199/rootfs","created":"2023-12-01T18:59:00.405457838Z","annotations":{"io.kubernetes.cri.container-type":"sandbox","io.kubernetes.cri.sandbox-cpu-period":"100000","io.kubernetes.cri.sandbox-cpu-quota":"0","io.kubernetes.cri.sandbox-cpu-shares":"256","io.kubernetes.cri.sandbox-id":"f39292a0989585111b50d4115a545d232df69b9f78a22cca8cad460e8ee16199","io.kubernetes.cri.sandbox-log-directory":"/var/log/pods/kube-system_kube-apiserver-functional-616785_399196d44eb6c509f7b60d72c5662125","io.kubernetes.cri.sandbox-memory":"0","io.kubernetes.cri.sandbox-name":"kube-apiserver-functional-616785","io.kubernetes.cri.sandbox-namespace":"kube-system","io.kubernetes.cri.sandbox-uid":"399196d44eb6c509f7b60d72c5662125"},"owner":"root"}]
	I1201 18:59:59.654895  284652 cri.go:126] list returned 16 containers
	I1201 18:59:59.654904  284652 cri.go:129] container: {ID:0c2d4da8c8cce66a5ddb65b0ca03546edefdf740f59669699af3f7f312913564 Status:running}
	I1201 18:59:59.654916  284652 cri.go:135] skipping {0c2d4da8c8cce66a5ddb65b0ca03546edefdf740f59669699af3f7f312913564 running}: state = "running", want "paused"
	I1201 18:59:59.654924  284652 cri.go:129] container: {ID:172a910da74d5d4bd72533e43c42af2574c81da2cb8735f3148fb22366fc7b38 Status:running}
	I1201 18:59:59.654930  284652 cri.go:135] skipping {172a910da74d5d4bd72533e43c42af2574c81da2cb8735f3148fb22366fc7b38 running}: state = "running", want "paused"
	I1201 18:59:59.654935  284652 cri.go:129] container: {ID:1ade3bcae44a0456b38605717b59a54930e914ca80145f4a6a42f69f2c89bc4c Status:running}
	I1201 18:59:59.654940  284652 cri.go:131] skipping 1ade3bcae44a0456b38605717b59a54930e914ca80145f4a6a42f69f2c89bc4c - not in ps
	I1201 18:59:59.654945  284652 cri.go:129] container: {ID:1fb932a179c7041614a4fcc5715379bec1bebfa33c81139f5df677e9fa11945f Status:running}
	I1201 18:59:59.654950  284652 cri.go:135] skipping {1fb932a179c7041614a4fcc5715379bec1bebfa33c81139f5df677e9fa11945f running}: state = "running", want "paused"
	I1201 18:59:59.654956  284652 cri.go:129] container: {ID:2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648 Status:running}
	I1201 18:59:59.654961  284652 cri.go:135] skipping {2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648 running}: state = "running", want "paused"
	I1201 18:59:59.654966  284652 cri.go:129] container: {ID:35fdb6cba245d4380dd978af7332e0abf05039d07409269e23de390bfcaa6950 Status:running}
	I1201 18:59:59.654971  284652 cri.go:131] skipping 35fdb6cba245d4380dd978af7332e0abf05039d07409269e23de390bfcaa6950 - not in ps
	I1201 18:59:59.654976  284652 cri.go:129] container: {ID:5eee9bb6b890be25088c43c50c60ee0dd94c9533df6a93fd95e571b722f603e8 Status:running}
	I1201 18:59:59.654981  284652 cri.go:131] skipping 5eee9bb6b890be25088c43c50c60ee0dd94c9533df6a93fd95e571b722f603e8 - not in ps
	I1201 18:59:59.654986  284652 cri.go:129] container: {ID:6353341ab4be94e3126a82f86e5b7b6c810a95a11c1532a6524b40001c56ec1e Status:running}
	I1201 18:59:59.654991  284652 cri.go:135] skipping {6353341ab4be94e3126a82f86e5b7b6c810a95a11c1532a6524b40001c56ec1e running}: state = "running", want "paused"
	I1201 18:59:59.654996  284652 cri.go:129] container: {ID:6f24a29e8dde904ee7c79191a05e4233a4d3c4611dec66af1e784899ffd2f4fa Status:running}
	I1201 18:59:59.655004  284652 cri.go:135] skipping {6f24a29e8dde904ee7c79191a05e4233a4d3c4611dec66af1e784899ffd2f4fa running}: state = "running", want "paused"
	I1201 18:59:59.655009  284652 cri.go:129] container: {ID:8b55a4531eadf2b7b7ac4b6e98d285594d33022051b789a55aac0bc3c5faebf2 Status:running}
	I1201 18:59:59.655015  284652 cri.go:131] skipping 8b55a4531eadf2b7b7ac4b6e98d285594d33022051b789a55aac0bc3c5faebf2 - not in ps
	I1201 18:59:59.655019  284652 cri.go:129] container: {ID:98d1c2867c3ea4c91188d8e481c880102506a3796f45f4a9f7a39603f311a3ec Status:running}
	I1201 18:59:59.655025  284652 cri.go:131] skipping 98d1c2867c3ea4c91188d8e481c880102506a3796f45f4a9f7a39603f311a3ec - not in ps
	I1201 18:59:59.655029  284652 cri.go:129] container: {ID:9efdb32584bcd1c4938cb1074c27c1b3d95cdc91deea1013f2b99669c0621313 Status:running}
	I1201 18:59:59.655034  284652 cri.go:135] skipping {9efdb32584bcd1c4938cb1074c27c1b3d95cdc91deea1013f2b99669c0621313 running}: state = "running", want "paused"
	I1201 18:59:59.655039  284652 cri.go:129] container: {ID:a01aebe75ba55e11656030032922a9fe89ca68f6b88e61a6565e71e38c259849 Status:running}
	I1201 18:59:59.655045  284652 cri.go:135] skipping {a01aebe75ba55e11656030032922a9fe89ca68f6b88e61a6565e71e38c259849 running}: state = "running", want "paused"
	I1201 18:59:59.655050  284652 cri.go:129] container: {ID:c8f61359ddffa0351dabf4ab914d2d3c8bbd85a95648cf27a519b32bfd41bd9e Status:running}
	I1201 18:59:59.655055  284652 cri.go:131] skipping c8f61359ddffa0351dabf4ab914d2d3c8bbd85a95648cf27a519b32bfd41bd9e - not in ps
	I1201 18:59:59.655059  284652 cri.go:129] container: {ID:d7b2917b16b33132226cd5898d50cbfcba0d36be4be642e783846162063ff175 Status:running}
	I1201 18:59:59.655065  284652 cri.go:131] skipping d7b2917b16b33132226cd5898d50cbfcba0d36be4be642e783846162063ff175 - not in ps
	I1201 18:59:59.655069  284652 cri.go:129] container: {ID:f39292a0989585111b50d4115a545d232df69b9f78a22cca8cad460e8ee16199 Status:running}
	I1201 18:59:59.655075  284652 cri.go:131] skipping f39292a0989585111b50d4115a545d232df69b9f78a22cca8cad460e8ee16199 - not in ps
	I1201 18:59:59.655127  284652 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
	I1201 18:59:59.665845  284652 kubeadm.go:419] found existing configuration files, will attempt cluster restart
	I1201 18:59:59.665862  284652 kubeadm.go:636] restartCluster start
	I1201 18:59:59.665916  284652 ssh_runner.go:195] Run: sudo test -d /data/minikube
	I1201 18:59:59.675964  284652 kubeadm.go:127] /data/minikube skipping compat symlinks: sudo test -d /data/minikube: Process exited with status 1
	stdout:
	
	stderr:
	I1201 18:59:59.676524  284652 kubeconfig.go:92] found "functional-616785" server: "https://192.168.49.2:8441"
	I1201 18:59:59.677957  284652 ssh_runner.go:195] Run: sudo diff -u /var/tmp/minikube/kubeadm.yaml /var/tmp/minikube/kubeadm.yaml.new
	I1201 18:59:59.688997  284652 kubeadm.go:602] needs reconfigure: configs differ:
	-- stdout --
	--- /var/tmp/minikube/kubeadm.yaml	2023-12-01 18:58:51.889300352 +0000
	+++ /var/tmp/minikube/kubeadm.yaml.new	2023-12-01 18:59:59.029296118 +0000
	@@ -22,7 +22,7 @@
	 apiServer:
	   certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
	   extraArgs:
	-    enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
	+    enable-admission-plugins: "NamespaceAutoProvision"
	 controllerManager:
	   extraArgs:
	     allocate-node-cidrs: "true"
	
	-- /stdout --
	I1201 18:59:59.689008  284652 kubeadm.go:1135] stopping kube-system containers ...
	I1201 18:59:59.689018  284652 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name: Namespaces:[kube-system]}
	I1201 18:59:59.689071  284652 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
	I1201 18:59:59.732442  284652 cri.go:89] found id: "172a910da74d5d4bd72533e43c42af2574c81da2cb8735f3148fb22366fc7b38"
	I1201 18:59:59.732454  284652 cri.go:89] found id: "6f24a29e8dde904ee7c79191a05e4233a4d3c4611dec66af1e784899ffd2f4fa"
	I1201 18:59:59.732484  284652 cri.go:89] found id: "9efdb32584bcd1c4938cb1074c27c1b3d95cdc91deea1013f2b99669c0621313"
	I1201 18:59:59.732488  284652 cri.go:89] found id: "1fb932a179c7041614a4fcc5715379bec1bebfa33c81139f5df677e9fa11945f"
	I1201 18:59:59.732491  284652 cri.go:89] found id: "214de4fd9c7633a02aa05db38f69168fa0962e31a183a34b3a3afb40315ef80e"
	I1201 18:59:59.732495  284652 cri.go:89] found id: "0c2d4da8c8cce66a5ddb65b0ca03546edefdf740f59669699af3f7f312913564"
	I1201 18:59:59.732498  284652 cri.go:89] found id: "2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648"
	I1201 18:59:59.732501  284652 cri.go:89] found id: "6353341ab4be94e3126a82f86e5b7b6c810a95a11c1532a6524b40001c56ec1e"
	I1201 18:59:59.732504  284652 cri.go:89] found id: "a01aebe75ba55e11656030032922a9fe89ca68f6b88e61a6565e71e38c259849"
	I1201 18:59:59.732510  284652 cri.go:89] found id: ""
	I1201 18:59:59.732515  284652 cri.go:234] Stopping containers: [172a910da74d5d4bd72533e43c42af2574c81da2cb8735f3148fb22366fc7b38 6f24a29e8dde904ee7c79191a05e4233a4d3c4611dec66af1e784899ffd2f4fa 9efdb32584bcd1c4938cb1074c27c1b3d95cdc91deea1013f2b99669c0621313 1fb932a179c7041614a4fcc5715379bec1bebfa33c81139f5df677e9fa11945f 214de4fd9c7633a02aa05db38f69168fa0962e31a183a34b3a3afb40315ef80e 0c2d4da8c8cce66a5ddb65b0ca03546edefdf740f59669699af3f7f312913564 2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648 6353341ab4be94e3126a82f86e5b7b6c810a95a11c1532a6524b40001c56ec1e a01aebe75ba55e11656030032922a9fe89ca68f6b88e61a6565e71e38c259849]
	I1201 18:59:59.732566  284652 ssh_runner.go:195] Run: which crictl
	I1201 18:59:59.737048  284652 ssh_runner.go:195] Run: sudo /usr/bin/crictl stop --timeout=10 172a910da74d5d4bd72533e43c42af2574c81da2cb8735f3148fb22366fc7b38 6f24a29e8dde904ee7c79191a05e4233a4d3c4611dec66af1e784899ffd2f4fa 9efdb32584bcd1c4938cb1074c27c1b3d95cdc91deea1013f2b99669c0621313 1fb932a179c7041614a4fcc5715379bec1bebfa33c81139f5df677e9fa11945f 214de4fd9c7633a02aa05db38f69168fa0962e31a183a34b3a3afb40315ef80e 0c2d4da8c8cce66a5ddb65b0ca03546edefdf740f59669699af3f7f312913564 2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648 6353341ab4be94e3126a82f86e5b7b6c810a95a11c1532a6524b40001c56ec1e a01aebe75ba55e11656030032922a9fe89ca68f6b88e61a6565e71e38c259849
	I1201 19:00:04.993512  284652 ssh_runner.go:235] Completed: sudo /usr/bin/crictl stop --timeout=10 172a910da74d5d4bd72533e43c42af2574c81da2cb8735f3148fb22366fc7b38 6f24a29e8dde904ee7c79191a05e4233a4d3c4611dec66af1e784899ffd2f4fa 9efdb32584bcd1c4938cb1074c27c1b3d95cdc91deea1013f2b99669c0621313 1fb932a179c7041614a4fcc5715379bec1bebfa33c81139f5df677e9fa11945f 214de4fd9c7633a02aa05db38f69168fa0962e31a183a34b3a3afb40315ef80e 0c2d4da8c8cce66a5ddb65b0ca03546edefdf740f59669699af3f7f312913564 2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648 6353341ab4be94e3126a82f86e5b7b6c810a95a11c1532a6524b40001c56ec1e a01aebe75ba55e11656030032922a9fe89ca68f6b88e61a6565e71e38c259849: (5.256425175s)
	W1201 19:00:04.993567  284652 kubeadm.go:689] Failed to stop kube-system containers: port conflicts may arise: stop: crictl: sudo /usr/bin/crictl stop --timeout=10 172a910da74d5d4bd72533e43c42af2574c81da2cb8735f3148fb22366fc7b38 6f24a29e8dde904ee7c79191a05e4233a4d3c4611dec66af1e784899ffd2f4fa 9efdb32584bcd1c4938cb1074c27c1b3d95cdc91deea1013f2b99669c0621313 1fb932a179c7041614a4fcc5715379bec1bebfa33c81139f5df677e9fa11945f 214de4fd9c7633a02aa05db38f69168fa0962e31a183a34b3a3afb40315ef80e 0c2d4da8c8cce66a5ddb65b0ca03546edefdf740f59669699af3f7f312913564 2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648 6353341ab4be94e3126a82f86e5b7b6c810a95a11c1532a6524b40001c56ec1e a01aebe75ba55e11656030032922a9fe89ca68f6b88e61a6565e71e38c259849: Process exited with status 1
	stdout:
	172a910da74d5d4bd72533e43c42af2574c81da2cb8735f3148fb22366fc7b38
	6f24a29e8dde904ee7c79191a05e4233a4d3c4611dec66af1e784899ffd2f4fa
	9efdb32584bcd1c4938cb1074c27c1b3d95cdc91deea1013f2b99669c0621313
	1fb932a179c7041614a4fcc5715379bec1bebfa33c81139f5df677e9fa11945f
	
	stderr:
	E1201 19:00:04.990149    3390 remote_runtime.go:505] "StopContainer from runtime service failed" err="rpc error: code = NotFound desc = an error occurred when try to find container \"214de4fd9c7633a02aa05db38f69168fa0962e31a183a34b3a3afb40315ef80e\": not found" containerID="214de4fd9c7633a02aa05db38f69168fa0962e31a183a34b3a3afb40315ef80e"
	time="2023-12-01T19:00:04Z" level=fatal msg="stopping the container \"214de4fd9c7633a02aa05db38f69168fa0962e31a183a34b3a3afb40315ef80e\": rpc error: code = NotFound desc = an error occurred when try to find container \"214de4fd9c7633a02aa05db38f69168fa0962e31a183a34b3a3afb40315ef80e\": not found"
	I1201 19:00:04.993641  284652 ssh_runner.go:195] Run: sudo systemctl stop kubelet
	I1201 19:00:05.065032  284652 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
	I1201 19:00:05.077689  284652 kubeadm.go:155] found existing configuration files:
	-rw------- 1 root root 5639 Dec  1 18:58 /etc/kubernetes/admin.conf
	-rw------- 1 root root 5652 Dec  1 18:58 /etc/kubernetes/controller-manager.conf
	-rw------- 1 root root 2007 Dec  1 18:59 /etc/kubernetes/kubelet.conf
	-rw------- 1 root root 5604 Dec  1 18:58 /etc/kubernetes/scheduler.conf
	
	I1201 19:00:05.077752  284652 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/admin.conf
	I1201 19:00:05.090161  284652 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/kubelet.conf
	I1201 19:00:05.103373  284652 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/controller-manager.conf
	I1201 19:00:05.117210  284652 kubeadm.go:166] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/controller-manager.conf: Process exited with status 1
	stdout:
	
	stderr:
	I1201 19:00:05.117283  284652 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
	I1201 19:00:05.128614  284652 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/scheduler.conf
	I1201 19:00:05.140034  284652 kubeadm.go:166] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/scheduler.conf: Process exited with status 1
	stdout:
	
	stderr:
	I1201 19:00:05.140099  284652 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
	I1201 19:00:05.151011  284652 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
	I1201 19:00:05.162778  284652 kubeadm.go:713] reconfiguring cluster from /var/tmp/minikube/kubeadm.yaml
	I1201 19:00:05.162793  284652 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.28.4:$PATH" kubeadm init phase certs all --config /var/tmp/minikube/kubeadm.yaml"
	I1201 19:00:05.233115  284652 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.28.4:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml"
	I1201 19:00:06.825034  284652 ssh_runner.go:235] Completed: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.28.4:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml": (1.59189502s)
	I1201 19:00:06.825055  284652 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.28.4:$PATH" kubeadm init phase kubelet-start --config /var/tmp/minikube/kubeadm.yaml"
	I1201 19:00:07.038006  284652 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.28.4:$PATH" kubeadm init phase control-plane all --config /var/tmp/minikube/kubeadm.yaml"
	I1201 19:00:07.122641  284652 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.28.4:$PATH" kubeadm init phase etcd local --config /var/tmp/minikube/kubeadm.yaml"
	I1201 19:00:07.253505  284652 api_server.go:52] waiting for apiserver process to appear ...
	I1201 19:00:07.253578  284652 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I1201 19:00:07.275979  284652 api_server.go:72] duration metric: took 22.472124ms to wait for apiserver process to appear ...
	I1201 19:00:07.275993  284652 api_server.go:88] waiting for apiserver healthz status ...
	I1201 19:00:07.276008  284652 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8441/healthz ...
	I1201 19:00:07.285934  284652 api_server.go:279] https://192.168.49.2:8441/healthz returned 200:
	ok
	I1201 19:00:07.301513  284652 api_server.go:141] control plane version: v1.28.4
	I1201 19:00:07.301534  284652 api_server.go:131] duration metric: took 25.535303ms to wait for apiserver health ...
	I1201 19:00:07.301543  284652 cni.go:84] Creating CNI manager for ""
	I1201 19:00:07.301549  284652 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
	I1201 19:00:07.304402  284652 out.go:177] * Configuring CNI (Container Networking Interface) ...
	I1201 19:00:07.306765  284652 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
	I1201 19:00:07.312170  284652 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.28.4/kubectl ...
	I1201 19:00:07.312181  284652 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2438 bytes)
	I1201 19:00:07.355767  284652 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
	I1201 19:00:07.758953  284652 system_pods.go:43] waiting for kube-system pods to appear ...
	I1201 19:00:07.767241  284652 system_pods.go:59] 8 kube-system pods found
	I1201 19:00:07.767257  284652 system_pods.go:61] "coredns-5dd5756b68-ts5dh" [daaa92f6-6743-4e3e-a5e9-0d3bea42c1e0] Running
	I1201 19:00:07.767262  284652 system_pods.go:61] "etcd-functional-616785" [2cbc9b55-312b-419c-ae0c-a91f984dba54] Running
	I1201 19:00:07.767266  284652 system_pods.go:61] "kindnet-6zm7x" [e02380f9-bcfc-4a99-a5c0-e2372e5f0ad4] Running
	I1201 19:00:07.767270  284652 system_pods.go:61] "kube-apiserver-functional-616785" [7edc0134-41d4-48f6-bd02-45f6b11b6156] Running
	I1201 19:00:07.767276  284652 system_pods.go:61] "kube-controller-manager-functional-616785" [5a86626c-86fd-4a54-a1ad-647ab01f2623] Running
	I1201 19:00:07.767280  284652 system_pods.go:61] "kube-proxy-d8cvf" [f95b7b7d-84db-44db-8038-dcfd7e1ab770] Running
	I1201 19:00:07.767284  284652 system_pods.go:61] "kube-scheduler-functional-616785" [0b89a865-9935-4050-a38f-54d940cd0bd0] Running
	I1201 19:00:07.767291  284652 system_pods.go:61] "storage-provisioner" [393607ea-a066-4d39-93eb-75c52a6ab29e] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
	I1201 19:00:07.767297  284652 system_pods.go:74] duration metric: took 8.333367ms to wait for pod list to return data ...
	I1201 19:00:07.767304  284652 node_conditions.go:102] verifying NodePressure condition ...
	I1201 19:00:07.770831  284652 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
	I1201 19:00:07.770851  284652 node_conditions.go:123] node cpu capacity is 2
	I1201 19:00:07.770860  284652 node_conditions.go:105] duration metric: took 3.551243ms to run NodePressure ...
	I1201 19:00:07.770885  284652 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.28.4:$PATH" kubeadm init phase addon all --config /var/tmp/minikube/kubeadm.yaml"
	I1201 19:00:08.002951  284652 kubeadm.go:772] waiting for restarted kubelet to initialise ...
	I1201 19:00:08.009183  284652 retry.go:31] will retry after 310.10446ms: kubelet not initialised
	I1201 19:00:08.328265  284652 kubeadm.go:787] kubelet initialised
	I1201 19:00:08.328276  284652 kubeadm.go:788] duration metric: took 325.312258ms waiting for restarted kubelet to initialise ...
	I1201 19:00:08.328285  284652 pod_ready.go:35] extra waiting up to 4m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
	I1201 19:00:08.345770  284652 pod_ready.go:78] waiting up to 4m0s for pod "coredns-5dd5756b68-ts5dh" in "kube-system" namespace to be "Ready" ...
	I1201 19:00:08.373459  284652 pod_ready.go:97] node "functional-616785" hosting pod "coredns-5dd5756b68-ts5dh" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-616785" has status "Ready":"False"
	I1201 19:00:08.373474  284652 pod_ready.go:81] duration metric: took 27.689694ms waiting for pod "coredns-5dd5756b68-ts5dh" in "kube-system" namespace to be "Ready" ...
	E1201 19:00:08.373484  284652 pod_ready.go:66] WaitExtra: waitPodCondition: node "functional-616785" hosting pod "coredns-5dd5756b68-ts5dh" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-616785" has status "Ready":"False"
	I1201 19:00:08.373554  284652 pod_ready.go:78] waiting up to 4m0s for pod "etcd-functional-616785" in "kube-system" namespace to be "Ready" ...
	I1201 19:00:08.390780  284652 pod_ready.go:97] node "functional-616785" hosting pod "etcd-functional-616785" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-616785" has status "Ready":"False"
	I1201 19:00:08.390798  284652 pod_ready.go:81] duration metric: took 17.233949ms waiting for pod "etcd-functional-616785" in "kube-system" namespace to be "Ready" ...
	E1201 19:00:08.390808  284652 pod_ready.go:66] WaitExtra: waitPodCondition: node "functional-616785" hosting pod "etcd-functional-616785" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-616785" has status "Ready":"False"
	I1201 19:00:08.390887  284652 pod_ready.go:78] waiting up to 4m0s for pod "kube-apiserver-functional-616785" in "kube-system" namespace to be "Ready" ...
	I1201 19:00:08.403055  284652 pod_ready.go:97] node "functional-616785" hosting pod "kube-apiserver-functional-616785" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-616785" has status "Ready":"False"
	I1201 19:00:08.403071  284652 pod_ready.go:81] duration metric: took 12.177704ms waiting for pod "kube-apiserver-functional-616785" in "kube-system" namespace to be "Ready" ...
	E1201 19:00:08.403088  284652 pod_ready.go:66] WaitExtra: waitPodCondition: node "functional-616785" hosting pod "kube-apiserver-functional-616785" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-616785" has status "Ready":"False"
	I1201 19:00:08.409677  284652 pod_ready.go:78] waiting up to 4m0s for pod "kube-controller-manager-functional-616785" in "kube-system" namespace to be "Ready" ...
	I1201 19:00:08.421630  284652 pod_ready.go:97] node "functional-616785" hosting pod "kube-controller-manager-functional-616785" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-616785" has status "Ready":"False"
	I1201 19:00:08.421648  284652 pod_ready.go:81] duration metric: took 11.955662ms waiting for pod "kube-controller-manager-functional-616785" in "kube-system" namespace to be "Ready" ...
	E1201 19:00:08.421661  284652 pod_ready.go:66] WaitExtra: waitPodCondition: node "functional-616785" hosting pod "kube-controller-manager-functional-616785" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-616785" has status "Ready":"False"
	I1201 19:00:08.421725  284652 pod_ready.go:78] waiting up to 4m0s for pod "kube-proxy-d8cvf" in "kube-system" namespace to be "Ready" ...
	I1201 19:00:08.765278  284652 pod_ready.go:97] node "functional-616785" hosting pod "kube-proxy-d8cvf" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-616785" has status "Ready":"False"
	I1201 19:00:08.765292  284652 pod_ready.go:81] duration metric: took 343.558835ms waiting for pod "kube-proxy-d8cvf" in "kube-system" namespace to be "Ready" ...
	E1201 19:00:08.765301  284652 pod_ready.go:66] WaitExtra: waitPodCondition: node "functional-616785" hosting pod "kube-proxy-d8cvf" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-616785" has status "Ready":"False"
	I1201 19:00:08.765331  284652 pod_ready.go:78] waiting up to 4m0s for pod "kube-scheduler-functional-616785" in "kube-system" namespace to be "Ready" ...
	I1201 19:00:09.162488  284652 pod_ready.go:97] node "functional-616785" hosting pod "kube-scheduler-functional-616785" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-616785" has status "Ready":"False"
	I1201 19:00:09.162502  284652 pod_ready.go:81] duration metric: took 397.164454ms waiting for pod "kube-scheduler-functional-616785" in "kube-system" namespace to be "Ready" ...
	E1201 19:00:09.162511  284652 pod_ready.go:66] WaitExtra: waitPodCondition: node "functional-616785" hosting pod "kube-scheduler-functional-616785" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-616785" has status "Ready":"False"
	I1201 19:00:09.162622  284652 pod_ready.go:38] duration metric: took 834.326101ms for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
	I1201 19:00:09.162642  284652 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
	I1201 19:00:09.173311  284652 ops.go:34] apiserver oom_adj: -16
	I1201 19:00:09.173323  284652 kubeadm.go:640] restartCluster took 9.50745573s
	I1201 19:00:09.173331  284652 kubeadm.go:406] StartCluster complete in 9.596250455s
	I1201 19:00:09.173346  284652 settings.go:142] acquiring lock: {Name:mk509c4de5b63e24c154062001ac3a5a349afe54 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1201 19:00:09.173430  284652 settings.go:150] Updating kubeconfig:  /home/jenkins/minikube-integration/17703-252966/kubeconfig
	I1201 19:00:09.174146  284652 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/17703-252966/kubeconfig: {Name:mk1b3fc1b8f9b6d7245434b6dbdc3c3d1a4130cc Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1201 19:00:09.175278  284652 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.4/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
	I1201 19:00:09.175516  284652 config.go:182] Loaded profile config "functional-616785": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.4
	I1201 19:00:09.175566  284652 addons.go:499] enable addons start: toEnable=map[ambassador:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false helm-tiller:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:false volumesnapshots:false]
	I1201 19:00:09.175629  284652 addons.go:69] Setting storage-provisioner=true in profile "functional-616785"
	I1201 19:00:09.175646  284652 addons.go:231] Setting addon storage-provisioner=true in "functional-616785"
	W1201 19:00:09.175651  284652 addons.go:240] addon storage-provisioner should already be in state true
	I1201 19:00:09.175683  284652 host.go:66] Checking if "functional-616785" exists ...
	I1201 19:00:09.176096  284652 cli_runner.go:164] Run: docker container inspect functional-616785 --format={{.State.Status}}
	I1201 19:00:09.176321  284652 addons.go:69] Setting default-storageclass=true in profile "functional-616785"
	I1201 19:00:09.176336  284652 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "functional-616785"
	I1201 19:00:09.176724  284652 cli_runner.go:164] Run: docker container inspect functional-616785 --format={{.State.Status}}
	I1201 19:00:09.187627  284652 kapi.go:248] "coredns" deployment in "kube-system" namespace and "functional-616785" context rescaled to 1 replicas
	I1201 19:00:09.187661  284652 start.go:223] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.28.4 ContainerRuntime:containerd ControlPlane:true Worker:true}
	I1201 19:00:09.190782  284652 out.go:177] * Verifying Kubernetes components...
	I1201 19:00:09.196710  284652 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I1201 19:00:09.228900  284652 out.go:177]   - Using image gcr.io/k8s-minikube/storage-provisioner:v5
	I1201 19:00:09.230893  284652 addons.go:423] installing /etc/kubernetes/addons/storage-provisioner.yaml
	I1201 19:00:09.230903  284652 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
	I1201 19:00:09.230967  284652 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-616785
	I1201 19:00:09.228236  284652 addons.go:231] Setting addon default-storageclass=true in "functional-616785"
	W1201 19:00:09.231213  284652 addons.go:240] addon default-storageclass should already be in state true
	I1201 19:00:09.231239  284652 host.go:66] Checking if "functional-616785" exists ...
	I1201 19:00:09.231708  284652 cli_runner.go:164] Run: docker container inspect functional-616785 --format={{.State.Status}}
	I1201 19:00:09.268174  284652 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33098 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/functional-616785/id_rsa Username:docker}
	I1201 19:00:09.281791  284652 addons.go:423] installing /etc/kubernetes/addons/storageclass.yaml
	I1201 19:00:09.281803  284652 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
	I1201 19:00:09.281863  284652 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-616785
	I1201 19:00:09.311110  284652 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33098 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/functional-616785/id_rsa Username:docker}
	E1201 19:00:09.475849  284652 start.go:894] failed to get current CoreDNS ConfigMap: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.4/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml": Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8441 was refused - did you specify the right host or port?
	W1201 19:00:09.475869  284652 start.go:294] Unable to inject {"host.minikube.internal": 192.168.49.1} record into CoreDNS: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.4/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml": Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8441 was refused - did you specify the right host or port?
	W1201 19:00:09.475884  284652 out.go:239] Failed to inject host.minikube.internal into CoreDNS, this will limit the pods access to the host IP
	I1201 19:00:09.476107  284652 node_ready.go:35] waiting up to 6m0s for node "functional-616785" to be "Ready" ...
	I1201 19:00:09.476569  284652 node_ready.go:53] error getting node "functional-616785": Get "https://192.168.49.2:8441/api/v1/nodes/functional-616785": dial tcp 192.168.49.2:8441: connect: connection refused
	I1201 19:00:09.476579  284652 node_ready.go:38] duration metric: took 459.025µs waiting for node "functional-616785" to be "Ready" ...
	I1201 19:00:09.479815  284652 out.go:177] 
	W1201 19:00:09.482258  284652 out.go:239] X Exiting due to GUEST_START: failed to start node: wait 6m0s for node: waiting for node to be ready: waitNodeCondition: error getting node "functional-616785": Get "https://192.168.49.2:8441/api/v1/nodes/functional-616785": dial tcp 192.168.49.2:8441: connect: connection refused
	W1201 19:00:09.482426  284652 out.go:239] * 
	W1201 19:00:09.483533  284652 out.go:239] ╭─────────────────────────────────────────────────────────────────────────────────────────────╮
	│                                                                                             │
	│    * If the above advice does not help, please let us know:                                 │
	│      https://github.com/kubernetes/minikube/issues/new/choose                               │
	│                                                                                             │
	│    * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue.    │
	│                                                                                             │
	╰─────────────────────────────────────────────────────────────────────────────────────────────╯
	I1201 19:00:09.486982  284652 out.go:177] 
	
	* 
	* ==> container status <==
	* CONTAINER           IMAGE               CREATED              STATE               NAME                      ATTEMPT             POD ID              POD
	9f4183dd56963       04b4eaa3d3db8       4 seconds ago        Running             kindnet-cni               1                   1ade3bcae44a0       kindnet-6zm7x
	477d114576c31       ba04bb24b9575       4 seconds ago        Running             storage-provisioner       2                   8b55a4531eadf       storage-provisioner
	99140cde269c3       3ca3ca488cf13       4 seconds ago        Running             kube-proxy                1                   d7b2917b16b33       kube-proxy-d8cvf
	a46f9a2d3de02       97e04611ad434       4 seconds ago        Running             coredns                   1                   35fdb6cba245d       coredns-5dd5756b68-ts5dh
	7c7908b790bf5       04b4c447bb9d4       4 seconds ago        Exited              kube-apiserver            1                   c0f80812a4a06       kube-apiserver-functional-616785
	172a910da74d5       ba04bb24b9575       19 seconds ago       Exited              storage-provisioner       1                   8b55a4531eadf       storage-provisioner
	6f24a29e8dde9       97e04611ad434       35 seconds ago       Exited              coredns                   0                   35fdb6cba245d       coredns-5dd5756b68-ts5dh
	9efdb32584bcd       04b4eaa3d3db8       49 seconds ago       Exited              kindnet-cni               0                   1ade3bcae44a0       kindnet-6zm7x
	1fb932a179c70       3ca3ca488cf13       49 seconds ago       Exited              kube-proxy                0                   d7b2917b16b33       kube-proxy-d8cvf
	0c2d4da8c8cce       9cdd6470f48c8       About a minute ago   Running             etcd                      0                   5eee9bb6b890b       etcd-functional-616785
	6353341ab4be9       05c284c929889       About a minute ago   Running             kube-scheduler            0                   98d1c2867c3ea       kube-scheduler-functional-616785
	a01aebe75ba55       9961cbceaf234       About a minute ago   Running             kube-controller-manager   0                   c8f61359ddffa       kube-controller-manager-functional-616785
	
	* 
	* ==> containerd <==
	* Dec 01 19:00:08 functional-616785 containerd[3198]: time="2023-12-01T19:00:08.865621717Z" level=info msg="shim disconnected" id=7c7908b790bf54073f1315a8c1586f4746b82b87e895963cb52f53f06a6ddfd3
	Dec 01 19:00:08 functional-616785 containerd[3198]: time="2023-12-01T19:00:08.865906699Z" level=warning msg="cleaning up after shim disconnected" id=7c7908b790bf54073f1315a8c1586f4746b82b87e895963cb52f53f06a6ddfd3 namespace=k8s.io
	Dec 01 19:00:08 functional-616785 containerd[3198]: time="2023-12-01T19:00:08.866029297Z" level=info msg="cleaning up dead shim"
	Dec 01 19:00:08 functional-616785 containerd[3198]: time="2023-12-01T19:00:08.881174091Z" level=warning msg="cleanup warnings time=\"2023-12-01T19:00:08Z\" level=info msg=\"starting signal loop\" namespace=k8s.io pid=3933 runtime=io.containerd.runc.v2\n"
	Dec 01 19:00:09 functional-616785 containerd[3198]: time="2023-12-01T19:00:09.323354246Z" level=info msg="StopContainer for \"2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648\" with timeout 2 (s)"
	Dec 01 19:00:09 functional-616785 containerd[3198]: time="2023-12-01T19:00:09.323774896Z" level=info msg="Stop container \"2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648\" with signal terminated"
	Dec 01 19:00:09 functional-616785 containerd[3198]: time="2023-12-01T19:00:09.427791415Z" level=info msg="shim disconnected" id=f39292a0989585111b50d4115a545d232df69b9f78a22cca8cad460e8ee16199
	Dec 01 19:00:09 functional-616785 containerd[3198]: time="2023-12-01T19:00:09.427846552Z" level=warning msg="cleaning up after shim disconnected" id=f39292a0989585111b50d4115a545d232df69b9f78a22cca8cad460e8ee16199 namespace=k8s.io
	Dec 01 19:00:09 functional-616785 containerd[3198]: time="2023-12-01T19:00:09.428025223Z" level=info msg="cleaning up dead shim"
	Dec 01 19:00:09 functional-616785 containerd[3198]: time="2023-12-01T19:00:09.453212997Z" level=info msg="RemoveContainer for \"ded0a01255c344ae2871870352d750cd87a8083139cfbabb4ebf6e130736185b\""
	Dec 01 19:00:09 functional-616785 containerd[3198]: time="2023-12-01T19:00:09.467644070Z" level=warning msg="cleanup warnings time=\"2023-12-01T19:00:09Z\" level=info msg=\"starting signal loop\" namespace=k8s.io pid=4110 runtime=io.containerd.runc.v2\n"
	Dec 01 19:00:09 functional-616785 containerd[3198]: time="2023-12-01T19:00:09.472148961Z" level=info msg="RemoveContainer for \"ded0a01255c344ae2871870352d750cd87a8083139cfbabb4ebf6e130736185b\" returns successfully"
	Dec 01 19:00:09 functional-616785 containerd[3198]: time="2023-12-01T19:00:09.482076636Z" level=info msg="shim disconnected" id=2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648
	Dec 01 19:00:09 functional-616785 containerd[3198]: time="2023-12-01T19:00:09.482128409Z" level=warning msg="cleaning up after shim disconnected" id=2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648 namespace=k8s.io
	Dec 01 19:00:09 functional-616785 containerd[3198]: time="2023-12-01T19:00:09.482139363Z" level=info msg="cleaning up dead shim"
	Dec 01 19:00:09 functional-616785 containerd[3198]: time="2023-12-01T19:00:09.506842800Z" level=warning msg="cleanup warnings time=\"2023-12-01T19:00:09Z\" level=info msg=\"starting signal loop\" namespace=k8s.io pid=4133 runtime=io.containerd.runc.v2\n"
	Dec 01 19:00:09 functional-616785 containerd[3198]: time="2023-12-01T19:00:09.515807373Z" level=info msg="StopContainer for \"2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648\" returns successfully"
	Dec 01 19:00:09 functional-616785 containerd[3198]: time="2023-12-01T19:00:09.519174470Z" level=info msg="StopPodSandbox for \"f39292a0989585111b50d4115a545d232df69b9f78a22cca8cad460e8ee16199\""
	Dec 01 19:00:09 functional-616785 containerd[3198]: time="2023-12-01T19:00:09.519254501Z" level=info msg="Container to stop \"2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648\" must be in running or unknown state, current state \"CONTAINER_EXITED\""
	Dec 01 19:00:09 functional-616785 containerd[3198]: time="2023-12-01T19:00:09.520802451Z" level=info msg="TearDown network for sandbox \"f39292a0989585111b50d4115a545d232df69b9f78a22cca8cad460e8ee16199\" successfully"
	Dec 01 19:00:09 functional-616785 containerd[3198]: time="2023-12-01T19:00:09.520843238Z" level=info msg="StopPodSandbox for \"f39292a0989585111b50d4115a545d232df69b9f78a22cca8cad460e8ee16199\" returns successfully"
	Dec 01 19:00:10 functional-616785 containerd[3198]: time="2023-12-01T19:00:10.471069539Z" level=info msg="RemoveContainer for \"2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648\""
	Dec 01 19:00:10 functional-616785 containerd[3198]: time="2023-12-01T19:00:10.477687659Z" level=info msg="RemoveContainer for \"2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648\" returns successfully"
	Dec 01 19:00:10 functional-616785 containerd[3198]: time="2023-12-01T19:00:10.478396991Z" level=error msg="ContainerStatus for \"2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648\" failed" error="rpc error: code = NotFound desc = an error occurred when try to find container \"2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648\": not found"
	Dec 01 19:00:11 functional-616785 containerd[3198]: time="2023-12-01T19:00:11.164231108Z" level=error msg="ContainerStatus for \"2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648\" failed" error="rpc error: code = NotFound desc = an error occurred when try to find container \"2688cc66adf8e4f8e9a5996f8a27f4527b00c7741e59aadea1022fc868f95648\": not found"
	
	* 
	* ==> coredns [6f24a29e8dde904ee7c79191a05e4233a4d3c4611dec66af1e784899ffd2f4fa] <==
	* .:53
	[INFO] plugin/reload: Running configuration SHA512 = 05e3eaddc414b2d71a69b2e2bc6f2681fc1f4d04bcdd3acc1a41457bb7db518208b95ddfc4c9fffedc59c25a8faf458be1af4915a4a3c0d6777cb7a346bc5d86
	CoreDNS-1.10.1
	linux/arm64, go1.20, 055b2c3
	[INFO] 127.0.0.1:45795 - 10878 "HINFO IN 4947552381348439214.1364120457825711732. udp 57 false 512" NXDOMAIN qr,rd,ra 57 0.024084799s
	[INFO] SIGTERM: Shutting down servers then terminating
	[INFO] plugin/health: Going into lameduck mode for 5s
	
	* 
	* ==> coredns [a46f9a2d3de02376724a6dd19261947d89ea2731cc6be9f11a7fc5f18e8c69b3] <==
	* .:53
	[INFO] plugin/reload: Running configuration SHA512 = 05e3eaddc414b2d71a69b2e2bc6f2681fc1f4d04bcdd3acc1a41457bb7db518208b95ddfc4c9fffedc59c25a8faf458be1af4915a4a3c0d6777cb7a346bc5d86
	CoreDNS-1.10.1
	linux/arm64, go1.20, 055b2c3
	[INFO] 127.0.0.1:37930 - 20236 "HINFO IN 5203344202659884113.4534884378912464259. udp 57 false 512" NXDOMAIN qr,rd,ra 57 0.014683259s
	[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.26.1/tools/cache/reflector.go:169: watch of *v1.EndpointSlice ended with: very short watch: pkg/mod/k8s.io/client-go@v0.26.1/tools/cache/reflector.go:169: Unexpected watch close - watch lasted less than a second and no items received
	[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.26.1/tools/cache/reflector.go:169: watch of *v1.Namespace ended with: very short watch: pkg/mod/k8s.io/client-go@v0.26.1/tools/cache/reflector.go:169: Unexpected watch close - watch lasted less than a second and no items received
	[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.26.1/tools/cache/reflector.go:169: watch of *v1.Service ended with: very short watch: pkg/mod/k8s.io/client-go@v0.26.1/tools/cache/reflector.go:169: Unexpected watch close - watch lasted less than a second and no items received
	[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.26.1/tools/cache/reflector.go:169: failed to list *v1.EndpointSlice: Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?resourceVersion=494": dial tcp 10.96.0.1:443: connect: connection refused
	[ERROR] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.26.1/tools/cache/reflector.go:169: Failed to watch *v1.EndpointSlice: failed to list *v1.EndpointSlice: Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?resourceVersion=494": dial tcp 10.96.0.1:443: connect: connection refused
	[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.26.1/tools/cache/reflector.go:169: failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?resourceVersion=466": dial tcp 10.96.0.1:443: connect: connection refused
	[ERROR] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.26.1/tools/cache/reflector.go:169: Failed to watch *v1.Namespace: failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?resourceVersion=466": dial tcp 10.96.0.1:443: connect: connection refused
	[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.26.1/tools/cache/reflector.go:169: failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?resourceVersion=483": dial tcp 10.96.0.1:443: connect: connection refused
	[ERROR] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.26.1/tools/cache/reflector.go:169: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?resourceVersion=483": dial tcp 10.96.0.1:443: connect: connection refused
	[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.26.1/tools/cache/reflector.go:169: failed to list *v1.EndpointSlice: Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?resourceVersion=494": dial tcp 10.96.0.1:443: connect: connection refused
	[ERROR] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.26.1/tools/cache/reflector.go:169: Failed to watch *v1.EndpointSlice: failed to list *v1.EndpointSlice: Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?resourceVersion=494": dial tcp 10.96.0.1:443: connect: connection refused
	
	* 
	* ==> describe nodes <==
	* 
	* ==> dmesg <==
	* [  +0.000754] FS-Cache: N-cookie c=0000000c [p=00000003 fl=2 nc=0 na=1]
	[  +0.001026] FS-Cache: N-cookie d=00000000c0e2b83e{9p.inode} n=00000000b55389ab
	[  +0.001117] FS-Cache: N-key=[8] 'a0385c0100000000'
	[  +0.002877] FS-Cache: Duplicate cookie detected
	[  +0.000724] FS-Cache: O-cookie c=00000006 [p=00000003 fl=226 nc=0 na=1]
	[  +0.001014] FS-Cache: O-cookie d=00000000c0e2b83e{9p.inode} n=0000000009a500bd
	[  +0.001116] FS-Cache: O-key=[8] 'a0385c0100000000'
	[  +0.000790] FS-Cache: N-cookie c=0000000d [p=00000003 fl=2 nc=0 na=1]
	[  +0.000985] FS-Cache: N-cookie d=00000000c0e2b83e{9p.inode} n=000000006d601d2f
	[  +0.001101] FS-Cache: N-key=[8] 'a0385c0100000000'
	[  +2.615492] FS-Cache: Duplicate cookie detected
	[  +0.000772] FS-Cache: O-cookie c=00000004 [p=00000003 fl=226 nc=0 na=1]
	[  +0.001004] FS-Cache: O-cookie d=00000000c0e2b83e{9p.inode} n=0000000010bf0fcb
	[  +0.001152] FS-Cache: O-key=[8] '9f385c0100000000'
	[  +0.000813] FS-Cache: N-cookie c=0000000f [p=00000003 fl=2 nc=0 na=1]
	[  +0.000990] FS-Cache: N-cookie d=00000000c0e2b83e{9p.inode} n=00000000b55389ab
	[  +0.001159] FS-Cache: N-key=[8] '9f385c0100000000'
	[  +0.329811] FS-Cache: Duplicate cookie detected
	[  +0.000747] FS-Cache: O-cookie c=00000009 [p=00000003 fl=226 nc=0 na=1]
	[  +0.001030] FS-Cache: O-cookie d=00000000c0e2b83e{9p.inode} n=00000000e473eb7d
	[  +0.001123] FS-Cache: O-key=[8] 'a7385c0100000000'
	[  +0.000733] FS-Cache: N-cookie c=00000010 [p=00000003 fl=2 nc=0 na=1]
	[  +0.000973] FS-Cache: N-cookie d=00000000c0e2b83e{9p.inode} n=000000003d8bf441
	[  +0.001090] FS-Cache: N-key=[8] 'a7385c0100000000'
	[Dec 1 17:49] kmem.limit_in_bytes is deprecated and will be removed. Please report your usecase to linux-mm@kvack.org if you depend on this functionality.
	
	* 
	* ==> etcd [0c2d4da8c8cce66a5ddb65b0ca03546edefdf740f59669699af3f7f312913564] <==
	* {"level":"info","ts":"2023-12-01T18:59:00.979045Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc switched to configuration voters=(12593026477526642892)"}
	{"level":"info","ts":"2023-12-01T18:59:00.979238Z","caller":"membership/cluster.go:421","msg":"added member","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","added-peer-id":"aec36adc501070cc","added-peer-peer-urls":["https://192.168.49.2:2380"]}
	{"level":"info","ts":"2023-12-01T18:59:00.980881Z","caller":"embed/etcd.go:726","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
	{"level":"info","ts":"2023-12-01T18:59:00.981091Z","caller":"embed/etcd.go:597","msg":"serving peer traffic","address":"192.168.49.2:2380"}
	{"level":"info","ts":"2023-12-01T18:59:00.984488Z","caller":"embed/etcd.go:569","msg":"cmux::serve","address":"192.168.49.2:2380"}
	{"level":"info","ts":"2023-12-01T18:59:00.985258Z","caller":"embed/etcd.go:278","msg":"now serving peer/client/metrics","local-member-id":"aec36adc501070cc","initial-advertise-peer-urls":["https://192.168.49.2:2380"],"listen-peer-urls":["https://192.168.49.2:2380"],"advertise-client-urls":["https://192.168.49.2:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.49.2:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
	{"level":"info","ts":"2023-12-01T18:59:00.985378Z","caller":"embed/etcd.go:855","msg":"serving metrics","address":"http://127.0.0.1:2381"}
	{"level":"info","ts":"2023-12-01T18:59:01.014113Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc is starting a new election at term 1"}
	{"level":"info","ts":"2023-12-01T18:59:01.014356Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became pre-candidate at term 1"}
	{"level":"info","ts":"2023-12-01T18:59:01.014442Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgPreVoteResp from aec36adc501070cc at term 1"}
	{"level":"info","ts":"2023-12-01T18:59:01.014567Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became candidate at term 2"}
	{"level":"info","ts":"2023-12-01T18:59:01.014651Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgVoteResp from aec36adc501070cc at term 2"}
	{"level":"info","ts":"2023-12-01T18:59:01.014744Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became leader at term 2"}
	{"level":"info","ts":"2023-12-01T18:59:01.014819Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: aec36adc501070cc elected leader aec36adc501070cc at term 2"}
	{"level":"info","ts":"2023-12-01T18:59:01.018905Z","caller":"etcdserver/server.go:2571","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
	{"level":"info","ts":"2023-12-01T18:59:01.021746Z","caller":"etcdserver/server.go:2062","msg":"published local member to cluster through raft","local-member-id":"aec36adc501070cc","local-member-attributes":"{Name:functional-616785 ClientURLs:[https://192.168.49.2:2379]}","request-path":"/0/members/aec36adc501070cc/attributes","cluster-id":"fa54960ea34d58be","publish-timeout":"7s"}
	{"level":"info","ts":"2023-12-01T18:59:01.021924Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
	{"level":"info","ts":"2023-12-01T18:59:01.023071Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
	{"level":"info","ts":"2023-12-01T18:59:01.023696Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
	{"level":"info","ts":"2023-12-01T18:59:01.024787Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.49.2:2379"}
	{"level":"info","ts":"2023-12-01T18:59:01.02872Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
	{"level":"info","ts":"2023-12-01T18:59:01.030127Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
	{"level":"info","ts":"2023-12-01T18:59:01.030283Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","cluster-version":"3.5"}
	{"level":"info","ts":"2023-12-01T18:59:01.030438Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
	{"level":"info","ts":"2023-12-01T18:59:01.030541Z","caller":"etcdserver/server.go:2595","msg":"cluster version is updated","cluster-version":"3.5"}
	
	* 
	* ==> kernel <==
	*  19:00:13 up  1:42,  0 users,  load average: 1.12, 1.59, 1.73
	Linux functional-616785 5.15.0-1050-aws #55~20.04.1-Ubuntu SMP Mon Nov 6 12:18:16 UTC 2023 aarch64 aarch64 aarch64 GNU/Linux
	PRETTY_NAME="Ubuntu 22.04.3 LTS"
	
	* 
	* ==> kindnet [9efdb32584bcd1c4938cb1074c27c1b3d95cdc91deea1013f2b99669c0621313] <==
	* I1201 18:59:23.609639       1 main.go:102] connected to apiserver: https://10.96.0.1:443
	I1201 18:59:23.609709       1 main.go:107] hostIP = 192.168.49.2
	podIP = 192.168.49.2
	I1201 18:59:23.609870       1 main.go:116] setting mtu 1500 for CNI 
	I1201 18:59:23.609891       1 main.go:146] kindnetd IP family: "ipv4"
	I1201 18:59:23.609903       1 main.go:150] noMask IPv4 subnets: [10.244.0.0/16]
	I1201 18:59:24.107651       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I1201 18:59:24.107686       1 main.go:227] handling current node
	I1201 18:59:34.206410       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I1201 18:59:34.206445       1 main.go:227] handling current node
	I1201 18:59:44.220239       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I1201 18:59:44.220267       1 main.go:227] handling current node
	I1201 18:59:54.224204       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I1201 18:59:54.224235       1 main.go:227] handling current node
	I1201 19:00:04.234747       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I1201 19:00:04.234778       1 main.go:227] handling current node
	
	* 
	* ==> kindnet [9f4183dd56963e73258dd3b04d0b72d0311cd083e5a23a4f9463bca0138848ba] <==
	* I1201 19:00:08.908137       1 main.go:102] connected to apiserver: https://10.96.0.1:443
	I1201 19:00:08.908218       1 main.go:107] hostIP = 192.168.49.2
	podIP = 192.168.49.2
	I1201 19:00:08.908403       1 main.go:116] setting mtu 1500 for CNI 
	I1201 19:00:08.908769       1 main.go:146] kindnetd IP family: "ipv4"
	I1201 19:00:08.908799       1 main.go:150] noMask IPv4 subnets: [10.244.0.0/16]
	I1201 19:00:09.306000       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I1201 19:00:09.306035       1 main.go:227] handling current node
	
	* 
	* ==> kube-apiserver [7c7908b790bf54073f1315a8c1586f4746b82b87e895963cb52f53f06a6ddfd3] <==
	* I1201 19:00:08.781067       1 options.go:220] external host was not specified, using 192.168.49.2
	I1201 19:00:08.782421       1 server.go:148] Version: v1.28.4
	I1201 19:00:08.784548       1 server.go:150] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
	E1201 19:00:08.784953       1 run.go:74] "command failed" err="failed to create listener: failed to listen on 0.0.0.0:8441: listen tcp 0.0.0.0:8441: bind: address already in use"
	
	* 
	* ==> kube-controller-manager [a01aebe75ba55e11656030032922a9fe89ca68f6b88e61a6565e71e38c259849] <==
	* I1201 18:59:20.730183       1 shared_informer.go:318] Caches are synced for bootstrap_signer
	I1201 18:59:20.735719       1 shared_informer.go:318] Caches are synced for resource quota
	I1201 18:59:21.077631       1 shared_informer.go:318] Caches are synced for garbage collector
	I1201 18:59:21.132680       1 shared_informer.go:318] Caches are synced for garbage collector
	I1201 18:59:21.132715       1 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage"
	I1201 18:59:21.188755       1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set coredns-5dd5756b68 to 2"
	I1201 18:59:21.358886       1 event.go:307] "Event occurred" object="kube-system/kindnet" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kindnet-6zm7x"
	I1201 18:59:21.372368       1 event.go:307] "Event occurred" object="kube-system/kube-proxy" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-d8cvf"
	I1201 18:59:21.549862       1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-5dd5756b68 to 1 from 2"
	I1201 18:59:21.573066       1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-sg6kz"
	I1201 18:59:21.604974       1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-ts5dh"
	I1201 18:59:21.685556       1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="497.616941ms"
	I1201 18:59:21.706789       1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-5dd5756b68-sg6kz"
	I1201 18:59:21.736168       1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="50.554121ms"
	I1201 18:59:21.766371       1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="30.149126ms"
	I1201 18:59:21.766506       1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="96.681µs"
	I1201 18:59:23.538754       1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="93.252µs"
	I1201 18:59:23.545800       1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="389.842µs"
	I1201 18:59:23.548835       1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="93.038µs"
	I1201 18:59:38.544286       1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="93.99µs"
	I1201 18:59:38.574653       1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="11.931528ms"
	I1201 18:59:38.575231       1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="74.01µs"
	I1201 19:00:08.370173       1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="23.885364ms"
	I1201 19:00:08.370264       1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="53.907µs"
	I1201 19:00:10.640613       1 node_lifecycle_controller.go:1029] "Controller detected that all Nodes are not-Ready. Entering master disruption mode"
	
	* 
	* ==> kube-proxy [1fb932a179c7041614a4fcc5715379bec1bebfa33c81139f5df677e9fa11945f] <==
	* I1201 18:59:23.498353       1 server_others.go:69] "Using iptables proxy"
	I1201 18:59:23.520600       1 node.go:141] Successfully retrieved node IP: 192.168.49.2
	I1201 18:59:23.563559       1 server.go:632] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
	I1201 18:59:23.569567       1 server_others.go:152] "Using iptables Proxier"
	I1201 18:59:23.569604       1 server_others.go:421] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
	I1201 18:59:23.569613       1 server_others.go:438] "Defaulting to no-op detect-local"
	I1201 18:59:23.569762       1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
	I1201 18:59:23.570520       1 server.go:846] "Version info" version="v1.28.4"
	I1201 18:59:23.570537       1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
	I1201 18:59:23.571624       1 config.go:188] "Starting service config controller"
	I1201 18:59:23.571771       1 shared_informer.go:311] Waiting for caches to sync for service config
	I1201 18:59:23.571801       1 config.go:97] "Starting endpoint slice config controller"
	I1201 18:59:23.571806       1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
	I1201 18:59:23.572697       1 config.go:315] "Starting node config controller"
	I1201 18:59:23.572767       1 shared_informer.go:311] Waiting for caches to sync for node config
	I1201 18:59:23.672107       1 shared_informer.go:318] Caches are synced for endpoint slice config
	I1201 18:59:23.672299       1 shared_informer.go:318] Caches are synced for service config
	I1201 18:59:23.672837       1 shared_informer.go:318] Caches are synced for node config
	
	* 
	* ==> kube-proxy [99140cde269c3bb7410bf1be86e8a909ba785666d6e95cc06fefd7c5cf8c76e3] <==
	* I1201 19:00:08.968039       1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
	I1201 19:00:08.969104       1 config.go:188] "Starting service config controller"
	I1201 19:00:08.969469       1 shared_informer.go:311] Waiting for caches to sync for service config
	I1201 19:00:08.969647       1 config.go:97] "Starting endpoint slice config controller"
	I1201 19:00:08.969728       1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
	I1201 19:00:08.972773       1 config.go:315] "Starting node config controller"
	I1201 19:00:08.972913       1 shared_informer.go:311] Waiting for caches to sync for node config
	I1201 19:00:09.070117       1 shared_informer.go:318] Caches are synced for endpoint slice config
	I1201 19:00:09.070124       1 shared_informer.go:318] Caches are synced for service config
	I1201 19:00:09.073390       1 shared_informer.go:318] Caches are synced for node config
	W1201 19:00:09.365278       1 reflector.go:458] vendor/k8s.io/client-go/informers/factory.go:150: watch of *v1.Node ended with: very short watch: vendor/k8s.io/client-go/informers/factory.go:150: Unexpected watch close - watch lasted less than a second and no items received
	W1201 19:00:09.365342       1 reflector.go:458] vendor/k8s.io/client-go/informers/factory.go:150: watch of *v1.Service ended with: very short watch: vendor/k8s.io/client-go/informers/factory.go:150: Unexpected watch close - watch lasted less than a second and no items received
	W1201 19:00:09.365367       1 reflector.go:458] vendor/k8s.io/client-go/informers/factory.go:150: watch of *v1.EndpointSlice ended with: very short watch: vendor/k8s.io/client-go/informers/factory.go:150: Unexpected watch close - watch lasted less than a second and no items received
	W1201 19:00:10.432966       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Node: Get "https://control-plane.minikube.internal:8441/api/v1/nodes?fieldSelector=metadata.name%!D(MISSING)functional-616785&resourceVersion=476": dial tcp 192.168.49.2:8441: connect: connection refused
	E1201 19:00:10.433018       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Node: failed to list *v1.Node: Get "https://control-plane.minikube.internal:8441/api/v1/nodes?fieldSelector=metadata.name%!D(MISSING)functional-616785&resourceVersion=476": dial tcp 192.168.49.2:8441: connect: connection refused
	W1201 19:00:10.557764       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.EndpointSlice: Get "https://control-plane.minikube.internal:8441/apis/discovery.k8s.io/v1/endpointslices?labelSelector=%!s(MISSING)ervice.kubernetes.io%!F(MISSING)headless%!C(MISSING)%!s(MISSING)ervice.kubernetes.io%!F(MISSING)service-proxy-name&resourceVersion=494": dial tcp 192.168.49.2:8441: connect: connection refused
	E1201 19:00:10.557826       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.EndpointSlice: failed to list *v1.EndpointSlice: Get "https://control-plane.minikube.internal:8441/apis/discovery.k8s.io/v1/endpointslices?labelSelector=%!s(MISSING)ervice.kubernetes.io%!F(MISSING)headless%!C(MISSING)%!s(MISSING)ervice.kubernetes.io%!F(MISSING)service-proxy-name&resourceVersion=494": dial tcp 192.168.49.2:8441: connect: connection refused
	W1201 19:00:10.878895       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Service: Get "https://control-plane.minikube.internal:8441/api/v1/services?labelSelector=%!s(MISSING)ervice.kubernetes.io%!F(MISSING)headless%!C(MISSING)%!s(MISSING)ervice.kubernetes.io%!F(MISSING)service-proxy-name&resourceVersion=483": dial tcp 192.168.49.2:8441: connect: connection refused
	E1201 19:00:10.878942       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://control-plane.minikube.internal:8441/api/v1/services?labelSelector=%!s(MISSING)ervice.kubernetes.io%!F(MISSING)headless%!C(MISSING)%!s(MISSING)ervice.kubernetes.io%!F(MISSING)service-proxy-name&resourceVersion=483": dial tcp 192.168.49.2:8441: connect: connection refused
	W1201 19:00:12.182537       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Node: Get "https://control-plane.minikube.internal:8441/api/v1/nodes?fieldSelector=metadata.name%!D(MISSING)functional-616785&resourceVersion=476": dial tcp 192.168.49.2:8441: connect: connection refused
	E1201 19:00:12.182584       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Node: failed to list *v1.Node: Get "https://control-plane.minikube.internal:8441/api/v1/nodes?fieldSelector=metadata.name%!D(MISSING)functional-616785&resourceVersion=476": dial tcp 192.168.49.2:8441: connect: connection refused
	W1201 19:00:13.035569       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.EndpointSlice: Get "https://control-plane.minikube.internal:8441/apis/discovery.k8s.io/v1/endpointslices?labelSelector=%!s(MISSING)ervice.kubernetes.io%!F(MISSING)headless%!C(MISSING)%!s(MISSING)ervice.kubernetes.io%!F(MISSING)service-proxy-name&resourceVersion=494": dial tcp 192.168.49.2:8441: connect: connection refused
	E1201 19:00:13.035618       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.EndpointSlice: failed to list *v1.EndpointSlice: Get "https://control-plane.minikube.internal:8441/apis/discovery.k8s.io/v1/endpointslices?labelSelector=%!s(MISSING)ervice.kubernetes.io%!F(MISSING)headless%!C(MISSING)%!s(MISSING)ervice.kubernetes.io%!F(MISSING)service-proxy-name&resourceVersion=494": dial tcp 192.168.49.2:8441: connect: connection refused
	W1201 19:00:13.348591       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Service: Get "https://control-plane.minikube.internal:8441/api/v1/services?labelSelector=%!s(MISSING)ervice.kubernetes.io%!F(MISSING)headless%!C(MISSING)%!s(MISSING)ervice.kubernetes.io%!F(MISSING)service-proxy-name&resourceVersion=483": dial tcp 192.168.49.2:8441: connect: connection refused
	E1201 19:00:13.348642       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://control-plane.minikube.internal:8441/api/v1/services?labelSelector=%!s(MISSING)ervice.kubernetes.io%!F(MISSING)headless%!C(MISSING)%!s(MISSING)ervice.kubernetes.io%!F(MISSING)service-proxy-name&resourceVersion=483": dial tcp 192.168.49.2:8441: connect: connection refused
	
	* 
	* ==> kube-scheduler [6353341ab4be94e3126a82f86e5b7b6c810a95a11c1532a6524b40001c56ec1e] <==
	* W1201 18:59:04.862831       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
	E1201 18:59:04.863943       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
	W1201 18:59:05.669584       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
	E1201 18:59:05.669624       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
	W1201 18:59:05.672101       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
	E1201 18:59:05.672139       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
	W1201 18:59:05.677069       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
	E1201 18:59:05.677112       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
	W1201 18:59:05.746199       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
	E1201 18:59:05.746534       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
	W1201 18:59:05.828450       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
	E1201 18:59:05.828519       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
	W1201 18:59:05.912441       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
	E1201 18:59:05.912680       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
	W1201 18:59:05.976223       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
	E1201 18:59:05.976399       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
	W1201 18:59:05.979103       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
	E1201 18:59:05.979232       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
	W1201 18:59:06.021165       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
	E1201 18:59:06.021218       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
	W1201 18:59:06.082719       1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
	E1201 18:59:06.082973       1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
	W1201 18:59:06.148578       1 reflector.go:535] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
	E1201 18:59:06.148877       1 reflector.go:147] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
	I1201 18:59:08.317482       1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
	
	* 
	* ==> kubelet <==
	* Dec 01 19:00:11 functional-616785 kubelet[3579]: I1201 19:00:11.307744    3579 kubelet_volumes.go:161] "Cleaned up orphaned pod volumes dir" podUID="399196d44eb6c509f7b60d72c5662125" path="/var/lib/kubelet/pods/399196d44eb6c509f7b60d72c5662125/volumes"
	Dec 01 19:00:11 functional-616785 kubelet[3579]: I1201 19:00:11.474334    3579 scope.go:117] "RemoveContainer" containerID="7c7908b790bf54073f1315a8c1586f4746b82b87e895963cb52f53f06a6ddfd3"
	Dec 01 19:00:11 functional-616785 kubelet[3579]: E1201 19:00:11.474888    3579 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-616785_kube-system(064115bd7120268299a4b217a1add347)\"" pod="kube-system/kube-apiserver-functional-616785" podUID="064115bd7120268299a4b217a1add347"
	Dec 01 19:00:11 functional-616785 kubelet[3579]: I1201 19:00:11.475040    3579 status_manager.go:853] "Failed to get status for pod" podUID="f95b7b7d-84db-44db-8038-dcfd7e1ab770" pod="kube-system/kube-proxy-d8cvf" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/kube-proxy-d8cvf\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:11 functional-616785 kubelet[3579]: I1201 19:00:11.475304    3579 status_manager.go:853] "Failed to get status for pod" podUID="b5714ae4c71e32df6a08d33071c9d40f" pod="kube-system/kube-scheduler-functional-616785" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/kube-scheduler-functional-616785\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:11 functional-616785 kubelet[3579]: I1201 19:00:11.475496    3579 status_manager.go:853] "Failed to get status for pod" podUID="064115bd7120268299a4b217a1add347" pod="kube-system/kube-apiserver-functional-616785" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/kube-apiserver-functional-616785\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:11 functional-616785 kubelet[3579]: I1201 19:00:11.475670    3579 status_manager.go:853] "Failed to get status for pod" podUID="393607ea-a066-4d39-93eb-75c52a6ab29e" pod="kube-system/storage-provisioner" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/storage-provisioner\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:11 functional-616785 kubelet[3579]: I1201 19:00:11.475844    3579 status_manager.go:853] "Failed to get status for pod" podUID="e02380f9-bcfc-4a99-a5c0-e2372e5f0ad4" pod="kube-system/kindnet-6zm7x" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/kindnet-6zm7x\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:11 functional-616785 kubelet[3579]: I1201 19:00:11.476016    3579 status_manager.go:853] "Failed to get status for pod" podUID="daaa92f6-6743-4e3e-a5e9-0d3bea42c1e0" pod="kube-system/coredns-5dd5756b68-ts5dh" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/coredns-5dd5756b68-ts5dh\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:12 functional-616785 kubelet[3579]: I1201 19:00:12.477921    3579 scope.go:117] "RemoveContainer" containerID="7c7908b790bf54073f1315a8c1586f4746b82b87e895963cb52f53f06a6ddfd3"
	Dec 01 19:00:12 functional-616785 kubelet[3579]: E1201 19:00:12.478553    3579 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-616785_kube-system(064115bd7120268299a4b217a1add347)\"" pod="kube-system/kube-apiserver-functional-616785" podUID="064115bd7120268299a4b217a1add347"
	Dec 01 19:00:12 functional-616785 kubelet[3579]: I1201 19:00:12.630927    3579 status_manager.go:853] "Failed to get status for pod" podUID="064115bd7120268299a4b217a1add347" pod="kube-system/kube-apiserver-functional-616785" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/kube-apiserver-functional-616785\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:12 functional-616785 kubelet[3579]: I1201 19:00:12.631228    3579 status_manager.go:853] "Failed to get status for pod" podUID="012a17049c3f357a0b12f711f68b3301" pod="kube-system/kube-controller-manager-functional-616785" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/kube-controller-manager-functional-616785\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:12 functional-616785 kubelet[3579]: I1201 19:00:12.633525    3579 status_manager.go:853] "Failed to get status for pod" podUID="393607ea-a066-4d39-93eb-75c52a6ab29e" pod="kube-system/storage-provisioner" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/storage-provisioner\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:12 functional-616785 kubelet[3579]: I1201 19:00:12.633796    3579 status_manager.go:853] "Failed to get status for pod" podUID="e02380f9-bcfc-4a99-a5c0-e2372e5f0ad4" pod="kube-system/kindnet-6zm7x" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/kindnet-6zm7x\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:12 functional-616785 kubelet[3579]: I1201 19:00:12.634039    3579 status_manager.go:853] "Failed to get status for pod" podUID="daaa92f6-6743-4e3e-a5e9-0d3bea42c1e0" pod="kube-system/coredns-5dd5756b68-ts5dh" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/coredns-5dd5756b68-ts5dh\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:12 functional-616785 kubelet[3579]: I1201 19:00:12.634369    3579 status_manager.go:853] "Failed to get status for pod" podUID="f95b7b7d-84db-44db-8038-dcfd7e1ab770" pod="kube-system/kube-proxy-d8cvf" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/kube-proxy-d8cvf\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:12 functional-616785 kubelet[3579]: I1201 19:00:12.634634    3579 status_manager.go:853] "Failed to get status for pod" podUID="b5714ae4c71e32df6a08d33071c9d40f" pod="kube-system/kube-scheduler-functional-616785" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/kube-scheduler-functional-616785\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:13 functional-616785 kubelet[3579]: I1201 19:00:13.480450    3579 status_manager.go:853] "Failed to get status for pod" podUID="064115bd7120268299a4b217a1add347" pod="kube-system/kube-apiserver-functional-616785" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/kube-apiserver-functional-616785\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:13 functional-616785 kubelet[3579]: I1201 19:00:13.480859    3579 status_manager.go:853] "Failed to get status for pod" podUID="012a17049c3f357a0b12f711f68b3301" pod="kube-system/kube-controller-manager-functional-616785" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/kube-controller-manager-functional-616785\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:13 functional-616785 kubelet[3579]: I1201 19:00:13.481123    3579 status_manager.go:853] "Failed to get status for pod" podUID="393607ea-a066-4d39-93eb-75c52a6ab29e" pod="kube-system/storage-provisioner" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/storage-provisioner\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:13 functional-616785 kubelet[3579]: I1201 19:00:13.481377    3579 status_manager.go:853] "Failed to get status for pod" podUID="e02380f9-bcfc-4a99-a5c0-e2372e5f0ad4" pod="kube-system/kindnet-6zm7x" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/kindnet-6zm7x\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:13 functional-616785 kubelet[3579]: I1201 19:00:13.481943    3579 status_manager.go:853] "Failed to get status for pod" podUID="daaa92f6-6743-4e3e-a5e9-0d3bea42c1e0" pod="kube-system/coredns-5dd5756b68-ts5dh" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/coredns-5dd5756b68-ts5dh\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:13 functional-616785 kubelet[3579]: I1201 19:00:13.482268    3579 status_manager.go:853] "Failed to get status for pod" podUID="f95b7b7d-84db-44db-8038-dcfd7e1ab770" pod="kube-system/kube-proxy-d8cvf" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/kube-proxy-d8cvf\": dial tcp 192.168.49.2:8441: connect: connection refused"
	Dec 01 19:00:13 functional-616785 kubelet[3579]: I1201 19:00:13.482638    3579 status_manager.go:853] "Failed to get status for pod" podUID="b5714ae4c71e32df6a08d33071c9d40f" pod="kube-system/kube-scheduler-functional-616785" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/kube-scheduler-functional-616785\": dial tcp 192.168.49.2:8441: connect: connection refused"
	
	* 
	* ==> storage-provisioner [172a910da74d5d4bd72533e43c42af2574c81da2cb8735f3148fb22366fc7b38] <==
	* I1201 18:59:53.741614       1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
	I1201 18:59:53.754609       1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
	I1201 18:59:53.754704       1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
	I1201 18:59:53.763812       1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
	I1201 18:59:53.766136       1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_functional-616785_31b3a833-b9d7-486c-b75b-c46b58186b32!
	I1201 18:59:53.766892       1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"dc41cc85-2a39-4b80-b12c-8ed2ca2b6e8f", APIVersion:"v1", ResourceVersion:"456", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' functional-616785_31b3a833-b9d7-486c-b75b-c46b58186b32 became leader
	I1201 18:59:53.866332       1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_functional-616785_31b3a833-b9d7-486c-b75b-c46b58186b32!
	
	* 
	* ==> storage-provisioner [477d114576c3196c1f8fc46abb75a2b06e00afd9cf1d61a1aac989f769c7e723] <==
	* I1201 19:00:08.787745       1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
	I1201 19:00:08.820066       1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
	I1201 19:00:08.820690       1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
	E1201 19:00:12.282980       1 leaderelection.go:325] error retrieving resource lock kube-system/k8s.io-minikube-hostpath: Get "https://10.96.0.1:443/api/v1/namespaces/kube-system/endpoints/k8s.io-minikube-hostpath": dial tcp 10.96.0.1:443: connect: connection refused
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	E1201 19:00:13.455153  286387 logs.go:195] command /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.4/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" failed with error: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.4/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8441 was refused - did you specify the right host or port?
	 output: "\n** stderr ** \nThe connection to the server localhost:8441 was refused - did you specify the right host or port?\n\n** /stderr **"
	! unable to fetch logs for: describe nodes

                                                
                                                
** /stderr **
helpers_test.go:254: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p functional-616785 -n functional-616785
helpers_test.go:254: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.APIServer}} -p functional-616785 -n functional-616785: exit status 2 (363.282451ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
helpers_test.go:254: status error: exit status 2 (may be ok)
helpers_test.go:256: "functional-616785" apiserver is not running, skipping kubectl commands (state="Stopped")
--- FAIL: TestFunctional/serial/ComponentHealth (2.43s)

                                                
                                    
x
+
TestFunctional/serial/LogsFileCmd (1.67s)

                                                
                                                
=== RUN   TestFunctional/serial/LogsFileCmd
functional_test.go:1246: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 logs --file /tmp/TestFunctionalserialLogsFileCmd2960057172/001/logs.txt
functional_test.go:1246: (dbg) Done: out/minikube-linux-arm64 -p functional-616785 logs --file /tmp/TestFunctionalserialLogsFileCmd2960057172/001/logs.txt: (1.667825403s)
functional_test.go:1251: expected empty minikube logs output, but got: 
***
-- stdout --
	

                                                
                                                
-- /stdout --
** stderr ** 
	E1201 19:00:17.070965  286873 logs.go:195] command /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.4/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" failed with error: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.4/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8441 was refused - did you specify the right host or port?
	 output: "\n** stderr ** \nThe connection to the server localhost:8441 was refused - did you specify the right host or port?\n\n** /stderr **"
	! unable to fetch logs for: describe nodes

                                                
                                                
** /stderr *****
--- FAIL: TestFunctional/serial/LogsFileCmd (1.67s)

                                                
                                    
x
+
TestFunctional/serial/InvalidService (0.08s)

                                                
                                                
=== RUN   TestFunctional/serial/InvalidService
functional_test.go:2317: (dbg) Run:  kubectl --context functional-616785 apply -f testdata/invalidsvc.yaml
functional_test.go:2317: (dbg) Non-zero exit: kubectl --context functional-616785 apply -f testdata/invalidsvc.yaml: exit status 1 (78.253032ms)

                                                
                                                
** stderr ** 
	The connection to the server 192.168.49.2:8441 was refused - did you specify the right host or port?

                                                
                                                
** /stderr **
functional_test.go:2319: kubectl --context functional-616785 apply -f testdata/invalidsvc.yaml failed: exit status 1
--- FAIL: TestFunctional/serial/InvalidService (0.08s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/RunSecondTunnel (0.6s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/RunSecondTunnel
functional_test_tunnel_test.go:154: (dbg) daemon: [out/minikube-linux-arm64 -p functional-616785 tunnel --alsologtostderr]
functional_test_tunnel_test.go:154: (dbg) daemon: [out/minikube-linux-arm64 -p functional-616785 tunnel --alsologtostderr]
functional_test_tunnel_test.go:190: tunnel command failed with unexpected error: exit code 119. stderr: I1201 19:00:19.305676  287409 out.go:296] Setting OutFile to fd 1 ...
I1201 19:00:19.306570  287409 out.go:343] TERM=,COLORTERM=, which probably does not support color
I1201 19:00:19.306626  287409 out.go:309] Setting ErrFile to fd 2...
I1201 19:00:19.306649  287409 out.go:343] TERM=,COLORTERM=, which probably does not support color
I1201 19:00:19.307454  287409 root.go:338] Updating PATH: /home/jenkins/minikube-integration/17703-252966/.minikube/bin
I1201 19:00:19.309050  287409 mustload.go:65] Loading cluster: functional-616785
I1201 19:00:19.309612  287409 config.go:182] Loaded profile config "functional-616785": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.4
I1201 19:00:19.310301  287409 cli_runner.go:164] Run: docker container inspect functional-616785 --format={{.State.Status}}
I1201 19:00:19.344069  287409 host.go:66] Checking if "functional-616785" exists ...
I1201 19:00:19.344399  287409 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1201 19:00:19.512886  287409 info.go:266] docker info: {ID:EOU5:DNGX:XN6V:L2FZ:UXRM:5TWK:EVUR:KC2F:GT7Z:Y4O4:GB77:5PD3 Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:35 OomKillDisable:true NGoroutines:46 SystemTime:2023-12-01 19:00:19.498842588 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1050-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archit
ecture:aarch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8215040000 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-31-251 Labels:[] ExperimentalBuild:false ServerVersion:24.0.7 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f Expected:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f} RuncCommit:{ID:v1.1.10-0-g18a0cb0 Expected:v1.1.10-0-g18a0cb0} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.21.0]] Warnings:<nil>}}
I1201 19:00:19.513015  287409 api_server.go:166] Checking apiserver status ...
I1201 19:00:19.513079  287409 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1201 19:00:19.513120  287409 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-616785
I1201 19:00:19.543275  287409 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33098 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/functional-616785/id_rsa Username:docker}
W1201 19:00:19.654572  287409 api_server.go:170] stopped: unable to get apiserver pid: sudo pgrep -xnf kube-apiserver.*minikube.*: Process exited with status 1
stdout:

                                                
                                                
stderr:
I1201 19:00:19.657310  287409 out.go:177] * This control plane is not running! (state=Stopped)
W1201 19:00:19.659003  287409 out.go:239] ! This is unusual - you may want to investigate using "minikube logs -p functional-616785"
! This is unusual - you may want to investigate using "minikube logs -p functional-616785"
I1201 19:00:19.660929  287409 out.go:177]   To start a cluster, run: "minikube start -p functional-616785"

                                                
                                                
stdout: * This control plane is not running! (state=Stopped)
To start a cluster, run: "minikube start -p functional-616785"
functional_test_tunnel_test.go:194: (dbg) stopping [out/minikube-linux-arm64 -p functional-616785 tunnel --alsologtostderr] ...
helpers_test.go:508: unable to kill pid 287410: os: process already finished
functional_test_tunnel_test.go:194: read stdout failed: read |0: file already closed
functional_test_tunnel_test.go:194: (dbg) [out/minikube-linux-arm64 -p functional-616785 tunnel --alsologtostderr] stdout:
functional_test_tunnel_test.go:194: read stderr failed: read |0: file already closed
functional_test_tunnel_test.go:194: (dbg) [out/minikube-linux-arm64 -p functional-616785 tunnel --alsologtostderr] stderr:
functional_test_tunnel_test.go:194: (dbg) stopping [out/minikube-linux-arm64 -p functional-616785 tunnel --alsologtostderr] ...
helpers_test.go:490: unable to find parent, assuming dead: process does not exist
functional_test_tunnel_test.go:194: read stdout failed: read |0: file already closed
functional_test_tunnel_test.go:194: (dbg) [out/minikube-linux-arm64 -p functional-616785 tunnel --alsologtostderr] stdout:
functional_test_tunnel_test.go:194: read stderr failed: read |0: file already closed
functional_test_tunnel_test.go:194: (dbg) [out/minikube-linux-arm64 -p functional-616785 tunnel --alsologtostderr] stderr:
--- FAIL: TestFunctional/parallel/TunnelCmd/serial/RunSecondTunnel (0.60s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/WaitService/Setup (0.09s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/WaitService/Setup
functional_test_tunnel_test.go:212: (dbg) Run:  kubectl --context functional-616785 apply -f testdata/testsvc.yaml
functional_test_tunnel_test.go:212: (dbg) Non-zero exit: kubectl --context functional-616785 apply -f testdata/testsvc.yaml: exit status 1 (88.037247ms)

                                                
                                                
** stderr ** 
	The connection to the server 192.168.49.2:8441 was refused - did you specify the right host or port?

                                                
                                                
** /stderr **
functional_test_tunnel_test.go:214: kubectl --context functional-616785 apply -f testdata/testsvc.yaml failed: exit status 1
--- FAIL: TestFunctional/parallel/TunnelCmd/serial/WaitService/Setup (0.09s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/AccessDirect (116.27s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/AccessDirect
functional_test_tunnel_test.go:288: failed to hit nginx at "http://": Temporary Error: Get "http:": http: no Host in request URL
functional_test_tunnel_test.go:290: (dbg) Run:  kubectl --context functional-616785 get svc nginx-svc
functional_test_tunnel_test.go:290: (dbg) Non-zero exit: kubectl --context functional-616785 get svc nginx-svc: exit status 1 (83.495185ms)

                                                
                                                
** stderr ** 
	Error from server (NotFound): services "nginx-svc" not found

                                                
                                                
** /stderr **
functional_test_tunnel_test.go:292: kubectl --context functional-616785 get svc nginx-svc failed: exit status 1
functional_test_tunnel_test.go:294: failed to kubectl get svc nginx-svc:
functional_test_tunnel_test.go:301: expected body to contain "Welcome to nginx!", but got *""*
--- FAIL: TestFunctional/parallel/TunnelCmd/serial/AccessDirect (116.27s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageLoadDaemon (4.75s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageLoadDaemon
functional_test.go:354: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 image load --daemon gcr.io/google-containers/addon-resizer:functional-616785 --alsologtostderr
functional_test.go:354: (dbg) Done: out/minikube-linux-arm64 -p functional-616785 image load --daemon gcr.io/google-containers/addon-resizer:functional-616785 --alsologtostderr: (4.373378368s)
functional_test.go:447: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 image ls
functional_test.go:442: expected "gcr.io/google-containers/addon-resizer:functional-616785" to be loaded into minikube but the image is not there
--- FAIL: TestFunctional/parallel/ImageCommands/ImageLoadDaemon (4.75s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageReloadDaemon (3.54s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageReloadDaemon
functional_test.go:364: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 image load --daemon gcr.io/google-containers/addon-resizer:functional-616785 --alsologtostderr
functional_test.go:364: (dbg) Done: out/minikube-linux-arm64 -p functional-616785 image load --daemon gcr.io/google-containers/addon-resizer:functional-616785 --alsologtostderr: (3.269807674s)
functional_test.go:447: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 image ls
functional_test.go:442: expected "gcr.io/google-containers/addon-resizer:functional-616785" to be loaded into minikube but the image is not there
--- FAIL: TestFunctional/parallel/ImageCommands/ImageReloadDaemon (3.54s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageTagAndLoadDaemon (6.04s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageTagAndLoadDaemon
functional_test.go:234: (dbg) Run:  docker pull gcr.io/google-containers/addon-resizer:1.8.9
functional_test.go:234: (dbg) Done: docker pull gcr.io/google-containers/addon-resizer:1.8.9: (2.567855949s)
functional_test.go:239: (dbg) Run:  docker tag gcr.io/google-containers/addon-resizer:1.8.9 gcr.io/google-containers/addon-resizer:functional-616785
functional_test.go:244: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 image load --daemon gcr.io/google-containers/addon-resizer:functional-616785 --alsologtostderr
functional_test.go:244: (dbg) Done: out/minikube-linux-arm64 -p functional-616785 image load --daemon gcr.io/google-containers/addon-resizer:functional-616785 --alsologtostderr: (3.167107246s)
functional_test.go:447: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 image ls
functional_test.go:442: expected "gcr.io/google-containers/addon-resizer:functional-616785" to be loaded into minikube but the image is not there
--- FAIL: TestFunctional/parallel/ImageCommands/ImageTagAndLoadDaemon (6.04s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageSaveToFile (0.59s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageSaveToFile
functional_test.go:379: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 image save gcr.io/google-containers/addon-resizer:functional-616785 /home/jenkins/workspace/Docker_Linux_containerd_arm64/addon-resizer-save.tar --alsologtostderr
functional_test.go:385: expected "/home/jenkins/workspace/Docker_Linux_containerd_arm64/addon-resizer-save.tar" to exist after `image save`, but doesn't exist
--- FAIL: TestFunctional/parallel/ImageCommands/ImageSaveToFile (0.59s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageLoadFromFile (0.23s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageLoadFromFile
functional_test.go:408: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 image load /home/jenkins/workspace/Docker_Linux_containerd_arm64/addon-resizer-save.tar --alsologtostderr
functional_test.go:410: loading image into minikube from file: <nil>

                                                
                                                
** stderr ** 
	I1201 19:02:43.768033  292008 out.go:296] Setting OutFile to fd 1 ...
	I1201 19:02:43.768668  292008 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I1201 19:02:43.768685  292008 out.go:309] Setting ErrFile to fd 2...
	I1201 19:02:43.768693  292008 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I1201 19:02:43.768995  292008 root.go:338] Updating PATH: /home/jenkins/minikube-integration/17703-252966/.minikube/bin
	I1201 19:02:43.769939  292008 config.go:182] Loaded profile config "functional-616785": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.4
	I1201 19:02:43.770110  292008 config.go:182] Loaded profile config "functional-616785": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.4
	I1201 19:02:43.770744  292008 cli_runner.go:164] Run: docker container inspect functional-616785 --format={{.State.Status}}
	I1201 19:02:43.792168  292008 ssh_runner.go:195] Run: systemctl --version
	I1201 19:02:43.792269  292008 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-616785
	I1201 19:02:43.814229  292008 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33098 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/functional-616785/id_rsa Username:docker}
	I1201 19:02:43.919696  292008 cache_images.go:286] Loading image from: /home/jenkins/workspace/Docker_Linux_containerd_arm64/addon-resizer-save.tar
	W1201 19:02:43.919754  292008 cache_images.go:254] Failed to load cached images for profile functional-616785. make sure the profile is running. loading images: stat /home/jenkins/workspace/Docker_Linux_containerd_arm64/addon-resizer-save.tar: no such file or directory
	I1201 19:02:43.919771  292008 cache_images.go:262] succeeded pushing to: 
	I1201 19:02:43.919790  292008 cache_images.go:263] failed pushing to: functional-616785

                                                
                                                
** /stderr **
--- FAIL: TestFunctional/parallel/ImageCommands/ImageLoadFromFile (0.23s)

                                                
                                    
x
+
TestIngressAddonLegacy/serial/ValidateIngressAddons (50.73s)

                                                
                                                
=== RUN   TestIngressAddonLegacy/serial/ValidateIngressAddons
addons_test.go:206: (dbg) Run:  kubectl --context ingress-addon-legacy-853196 wait --for=condition=ready --namespace=ingress-nginx pod --selector=app.kubernetes.io/component=controller --timeout=90s
addons_test.go:206: (dbg) Done: kubectl --context ingress-addon-legacy-853196 wait --for=condition=ready --namespace=ingress-nginx pod --selector=app.kubernetes.io/component=controller --timeout=90s: (11.78973316s)
addons_test.go:231: (dbg) Run:  kubectl --context ingress-addon-legacy-853196 replace --force -f testdata/nginx-ingress-v1beta1.yaml
addons_test.go:244: (dbg) Run:  kubectl --context ingress-addon-legacy-853196 replace --force -f testdata/nginx-pod-svc.yaml
addons_test.go:249: (dbg) TestIngressAddonLegacy/serial/ValidateIngressAddons: waiting 8m0s for pods matching "run=nginx" in namespace "default" ...
helpers_test.go:344: "nginx" [e7cb516c-dd8a-45b0-9324-d92f4eb3e410] Pending / Ready:ContainersNotReady (containers with unready status: [nginx]) / ContainersReady:ContainersNotReady (containers with unready status: [nginx])
E1201 19:04:52.072596  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.crt: no such file or directory
helpers_test.go:344: "nginx" [e7cb516c-dd8a-45b0-9324-d92f4eb3e410] Running
addons_test.go:249: (dbg) TestIngressAddonLegacy/serial/ValidateIngressAddons: run=nginx healthy within 8.026235452s
addons_test.go:261: (dbg) Run:  out/minikube-linux-arm64 -p ingress-addon-legacy-853196 ssh "curl -s http://127.0.0.1/ -H 'Host: nginx.example.com'"
addons_test.go:285: (dbg) Run:  kubectl --context ingress-addon-legacy-853196 replace --force -f testdata/ingress-dns-example-v1beta1.yaml
addons_test.go:290: (dbg) Run:  out/minikube-linux-arm64 -p ingress-addon-legacy-853196 ip
addons_test.go:296: (dbg) Run:  nslookup hello-john.test 192.168.49.2
addons_test.go:296: (dbg) Non-zero exit: nslookup hello-john.test 192.168.49.2: exit status 1 (15.025580626s)

                                                
                                                
-- stdout --
	;; connection timed out; no servers could be reached
	
	

                                                
                                                
-- /stdout --
addons_test.go:298: failed to nslookup hello-john.test host. args "nslookup hello-john.test 192.168.49.2" : exit status 1
addons_test.go:302: unexpected output from nslookup. stdout: ;; connection timed out; no servers could be reached

                                                
                                                

                                                
                                                

                                                
                                                
stderr: 
addons_test.go:305: (dbg) Run:  out/minikube-linux-arm64 -p ingress-addon-legacy-853196 addons disable ingress-dns --alsologtostderr -v=1
addons_test.go:305: (dbg) Done: out/minikube-linux-arm64 -p ingress-addon-legacy-853196 addons disable ingress-dns --alsologtostderr -v=1: (4.181097684s)
addons_test.go:310: (dbg) Run:  out/minikube-linux-arm64 -p ingress-addon-legacy-853196 addons disable ingress --alsologtostderr -v=1
E1201 19:05:19.452040  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/client.crt: no such file or directory
E1201 19:05:19.457428  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/client.crt: no such file or directory
E1201 19:05:19.467765  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/client.crt: no such file or directory
E1201 19:05:19.488109  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/client.crt: no such file or directory
E1201 19:05:19.528436  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/client.crt: no such file or directory
E1201 19:05:19.608809  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/client.crt: no such file or directory
E1201 19:05:19.769098  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/client.crt: no such file or directory
E1201 19:05:20.089682  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/client.crt: no such file or directory
E1201 19:05:20.730591  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/client.crt: no such file or directory
E1201 19:05:22.011115  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/client.crt: no such file or directory
E1201 19:05:24.571945  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/client.crt: no such file or directory
addons_test.go:310: (dbg) Done: out/minikube-linux-arm64 -p ingress-addon-legacy-853196 addons disable ingress --alsologtostderr -v=1: (7.571254491s)
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======>  post-mortem[TestIngressAddonLegacy/serial/ValidateIngressAddons]: docker inspect <======
helpers_test.go:231: (dbg) Run:  docker inspect ingress-addon-legacy-853196
helpers_test.go:235: (dbg) docker inspect ingress-addon-legacy-853196:

                                                
                                                
-- stdout --
	[
	    {
	        "Id": "de2b802fd021291d1c3d8c4d15bb5b6559a0539dd8c5003849f26eddff95c713",
	        "Created": "2023-12-01T19:03:17.307428738Z",
	        "Path": "/usr/local/bin/entrypoint",
	        "Args": [
	            "/sbin/init"
	        ],
	        "State": {
	            "Status": "running",
	            "Running": true,
	            "Paused": false,
	            "Restarting": false,
	            "OOMKilled": false,
	            "Dead": false,
	            "Pid": 293146,
	            "ExitCode": 0,
	            "Error": "",
	            "StartedAt": "2023-12-01T19:03:17.654841955Z",
	            "FinishedAt": "0001-01-01T00:00:00Z"
	        },
	        "Image": "sha256:e4e0f3cc6f04c458835e9edb05d52f031520d40521bc3568d81cbb7c06a79ef2",
	        "ResolvConfPath": "/var/lib/docker/containers/de2b802fd021291d1c3d8c4d15bb5b6559a0539dd8c5003849f26eddff95c713/resolv.conf",
	        "HostnamePath": "/var/lib/docker/containers/de2b802fd021291d1c3d8c4d15bb5b6559a0539dd8c5003849f26eddff95c713/hostname",
	        "HostsPath": "/var/lib/docker/containers/de2b802fd021291d1c3d8c4d15bb5b6559a0539dd8c5003849f26eddff95c713/hosts",
	        "LogPath": "/var/lib/docker/containers/de2b802fd021291d1c3d8c4d15bb5b6559a0539dd8c5003849f26eddff95c713/de2b802fd021291d1c3d8c4d15bb5b6559a0539dd8c5003849f26eddff95c713-json.log",
	        "Name": "/ingress-addon-legacy-853196",
	        "RestartCount": 0,
	        "Driver": "overlay2",
	        "Platform": "linux",
	        "MountLabel": "",
	        "ProcessLabel": "",
	        "AppArmorProfile": "unconfined",
	        "ExecIDs": null,
	        "HostConfig": {
	            "Binds": [
	                "/lib/modules:/lib/modules:ro",
	                "ingress-addon-legacy-853196:/var"
	            ],
	            "ContainerIDFile": "",
	            "LogConfig": {
	                "Type": "json-file",
	                "Config": {}
	            },
	            "NetworkMode": "ingress-addon-legacy-853196",
	            "PortBindings": {
	                "22/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "2376/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "32443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "5000/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "8443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ]
	            },
	            "RestartPolicy": {
	                "Name": "no",
	                "MaximumRetryCount": 0
	            },
	            "AutoRemove": false,
	            "VolumeDriver": "",
	            "VolumesFrom": null,
	            "ConsoleSize": [
	                0,
	                0
	            ],
	            "CapAdd": null,
	            "CapDrop": null,
	            "CgroupnsMode": "host",
	            "Dns": [],
	            "DnsOptions": [],
	            "DnsSearch": [],
	            "ExtraHosts": null,
	            "GroupAdd": null,
	            "IpcMode": "private",
	            "Cgroup": "",
	            "Links": null,
	            "OomScoreAdj": 0,
	            "PidMode": "",
	            "Privileged": true,
	            "PublishAllPorts": false,
	            "ReadonlyRootfs": false,
	            "SecurityOpt": [
	                "seccomp=unconfined",
	                "apparmor=unconfined",
	                "label=disable"
	            ],
	            "Tmpfs": {
	                "/run": "",
	                "/tmp": ""
	            },
	            "UTSMode": "",
	            "UsernsMode": "",
	            "ShmSize": 67108864,
	            "Runtime": "runc",
	            "Isolation": "",
	            "CpuShares": 0,
	            "Memory": 4294967296,
	            "NanoCpus": 2000000000,
	            "CgroupParent": "",
	            "BlkioWeight": 0,
	            "BlkioWeightDevice": [],
	            "BlkioDeviceReadBps": [],
	            "BlkioDeviceWriteBps": [],
	            "BlkioDeviceReadIOps": [],
	            "BlkioDeviceWriteIOps": [],
	            "CpuPeriod": 0,
	            "CpuQuota": 0,
	            "CpuRealtimePeriod": 0,
	            "CpuRealtimeRuntime": 0,
	            "CpusetCpus": "",
	            "CpusetMems": "",
	            "Devices": [],
	            "DeviceCgroupRules": null,
	            "DeviceRequests": null,
	            "MemoryReservation": 0,
	            "MemorySwap": 8589934592,
	            "MemorySwappiness": null,
	            "OomKillDisable": false,
	            "PidsLimit": null,
	            "Ulimits": null,
	            "CpuCount": 0,
	            "CpuPercent": 0,
	            "IOMaximumIOps": 0,
	            "IOMaximumBandwidth": 0,
	            "MaskedPaths": null,
	            "ReadonlyPaths": null
	        },
	        "GraphDriver": {
	            "Data": {
	                "LowerDir": "/var/lib/docker/overlay2/720cece81625f3f050bf15c65f8b54caf085d2df3a9e194b9d779b2f2bb92865-init/diff:/var/lib/docker/overlay2/049ae54891020b74263d4d0f668244f51ae19df0871773fd59686314976f2fd9/diff",
	                "MergedDir": "/var/lib/docker/overlay2/720cece81625f3f050bf15c65f8b54caf085d2df3a9e194b9d779b2f2bb92865/merged",
	                "UpperDir": "/var/lib/docker/overlay2/720cece81625f3f050bf15c65f8b54caf085d2df3a9e194b9d779b2f2bb92865/diff",
	                "WorkDir": "/var/lib/docker/overlay2/720cece81625f3f050bf15c65f8b54caf085d2df3a9e194b9d779b2f2bb92865/work"
	            },
	            "Name": "overlay2"
	        },
	        "Mounts": [
	            {
	                "Type": "bind",
	                "Source": "/lib/modules",
	                "Destination": "/lib/modules",
	                "Mode": "ro",
	                "RW": false,
	                "Propagation": "rprivate"
	            },
	            {
	                "Type": "volume",
	                "Name": "ingress-addon-legacy-853196",
	                "Source": "/var/lib/docker/volumes/ingress-addon-legacy-853196/_data",
	                "Destination": "/var",
	                "Driver": "local",
	                "Mode": "z",
	                "RW": true,
	                "Propagation": ""
	            }
	        ],
	        "Config": {
	            "Hostname": "ingress-addon-legacy-853196",
	            "Domainname": "",
	            "User": "",
	            "AttachStdin": false,
	            "AttachStdout": false,
	            "AttachStderr": false,
	            "ExposedPorts": {
	                "22/tcp": {},
	                "2376/tcp": {},
	                "32443/tcp": {},
	                "5000/tcp": {},
	                "8443/tcp": {}
	            },
	            "Tty": true,
	            "OpenStdin": false,
	            "StdinOnce": false,
	            "Env": [
	                "container=docker",
	                "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
	            ],
	            "Cmd": null,
	            "Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f",
	            "Volumes": null,
	            "WorkingDir": "/",
	            "Entrypoint": [
	                "/usr/local/bin/entrypoint",
	                "/sbin/init"
	            ],
	            "OnBuild": null,
	            "Labels": {
	                "created_by.minikube.sigs.k8s.io": "true",
	                "mode.minikube.sigs.k8s.io": "ingress-addon-legacy-853196",
	                "name.minikube.sigs.k8s.io": "ingress-addon-legacy-853196",
	                "role.minikube.sigs.k8s.io": ""
	            },
	            "StopSignal": "SIGRTMIN+3"
	        },
	        "NetworkSettings": {
	            "Bridge": "",
	            "SandboxID": "d7c6b7dfbfb93656af24a994f0e68960adea03d49aca0867204c45385a0a83d9",
	            "HairpinMode": false,
	            "LinkLocalIPv6Address": "",
	            "LinkLocalIPv6PrefixLen": 0,
	            "Ports": {
	                "22/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33103"
	                    }
	                ],
	                "2376/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33102"
	                    }
	                ],
	                "32443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33099"
	                    }
	                ],
	                "5000/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33101"
	                    }
	                ],
	                "8443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33100"
	                    }
	                ]
	            },
	            "SandboxKey": "/var/run/docker/netns/d7c6b7dfbfb9",
	            "SecondaryIPAddresses": null,
	            "SecondaryIPv6Addresses": null,
	            "EndpointID": "",
	            "Gateway": "",
	            "GlobalIPv6Address": "",
	            "GlobalIPv6PrefixLen": 0,
	            "IPAddress": "",
	            "IPPrefixLen": 0,
	            "IPv6Gateway": "",
	            "MacAddress": "",
	            "Networks": {
	                "ingress-addon-legacy-853196": {
	                    "IPAMConfig": {
	                        "IPv4Address": "192.168.49.2"
	                    },
	                    "Links": null,
	                    "Aliases": [
	                        "de2b802fd021",
	                        "ingress-addon-legacy-853196"
	                    ],
	                    "NetworkID": "3a1f66116b9e1def183552b790b8cf9ad2d589b591e5f583fdb2e39ca6a40d86",
	                    "EndpointID": "0159ac71ba666dccffdf882a61a365d28fce2b3cd419867f93075e6ee6bd7179",
	                    "Gateway": "192.168.49.1",
	                    "IPAddress": "192.168.49.2",
	                    "IPPrefixLen": 24,
	                    "IPv6Gateway": "",
	                    "GlobalIPv6Address": "",
	                    "GlobalIPv6PrefixLen": 0,
	                    "MacAddress": "02:42:c0:a8:31:02",
	                    "DriverOpts": null
	                }
	            }
	        }
	    }
	]

                                                
                                                
-- /stdout --
helpers_test.go:239: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p ingress-addon-legacy-853196 -n ingress-addon-legacy-853196
helpers_test.go:244: <<< TestIngressAddonLegacy/serial/ValidateIngressAddons FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======>  post-mortem[TestIngressAddonLegacy/serial/ValidateIngressAddons]: minikube logs <======
helpers_test.go:247: (dbg) Run:  out/minikube-linux-arm64 -p ingress-addon-legacy-853196 logs -n 25
helpers_test.go:247: (dbg) Done: out/minikube-linux-arm64 -p ingress-addon-legacy-853196 logs -n 25: (1.48757634s)
helpers_test.go:252: TestIngressAddonLegacy/serial/ValidateIngressAddons logs: 
-- stdout --
	* 
	* ==> Audit <==
	* |----------------|------------------------------------------------------------------------------|-----------------------------|---------|---------|---------------------|---------------------|
	|    Command     |                                     Args                                     |           Profile           |  User   | Version |     Start Time      |      End Time       |
	|----------------|------------------------------------------------------------------------------|-----------------------------|---------|---------|---------------------|---------------------|
	| update-context | functional-616785                                                            | functional-616785           | jenkins | v1.32.0 | 01 Dec 23 19:02 UTC | 01 Dec 23 19:02 UTC |
	|                | update-context                                                               |                             |         |         |                     |                     |
	|                | --alsologtostderr -v=2                                                       |                             |         |         |                     |                     |
	| image          | functional-616785 image load --daemon                                        | functional-616785           | jenkins | v1.32.0 | 01 Dec 23 19:02 UTC | 01 Dec 23 19:02 UTC |
	|                | gcr.io/google-containers/addon-resizer:functional-616785                     |                             |         |         |                     |                     |
	|                | --alsologtostderr                                                            |                             |         |         |                     |                     |
	| image          | functional-616785 image ls                                                   | functional-616785           | jenkins | v1.32.0 | 01 Dec 23 19:02 UTC | 01 Dec 23 19:02 UTC |
	| image          | functional-616785 image load --daemon                                        | functional-616785           | jenkins | v1.32.0 | 01 Dec 23 19:02 UTC | 01 Dec 23 19:02 UTC |
	|                | gcr.io/google-containers/addon-resizer:functional-616785                     |                             |         |         |                     |                     |
	|                | --alsologtostderr                                                            |                             |         |         |                     |                     |
	| image          | functional-616785 image ls                                                   | functional-616785           | jenkins | v1.32.0 | 01 Dec 23 19:02 UTC | 01 Dec 23 19:02 UTC |
	| image          | functional-616785 image save                                                 | functional-616785           | jenkins | v1.32.0 | 01 Dec 23 19:02 UTC | 01 Dec 23 19:02 UTC |
	|                | gcr.io/google-containers/addon-resizer:functional-616785                     |                             |         |         |                     |                     |
	|                | /home/jenkins/workspace/Docker_Linux_containerd_arm64/addon-resizer-save.tar |                             |         |         |                     |                     |
	|                | --alsologtostderr                                                            |                             |         |         |                     |                     |
	| image          | functional-616785 image rm                                                   | functional-616785           | jenkins | v1.32.0 | 01 Dec 23 19:02 UTC | 01 Dec 23 19:02 UTC |
	|                | gcr.io/google-containers/addon-resizer:functional-616785                     |                             |         |         |                     |                     |
	|                | --alsologtostderr                                                            |                             |         |         |                     |                     |
	| image          | functional-616785 image ls                                                   | functional-616785           | jenkins | v1.32.0 | 01 Dec 23 19:02 UTC | 01 Dec 23 19:02 UTC |
	| image          | functional-616785 image load                                                 | functional-616785           | jenkins | v1.32.0 | 01 Dec 23 19:02 UTC | 01 Dec 23 19:02 UTC |
	|                | /home/jenkins/workspace/Docker_Linux_containerd_arm64/addon-resizer-save.tar |                             |         |         |                     |                     |
	|                | --alsologtostderr                                                            |                             |         |         |                     |                     |
	| image          | functional-616785 image save --daemon                                        | functional-616785           | jenkins | v1.32.0 | 01 Dec 23 19:02 UTC | 01 Dec 23 19:02 UTC |
	|                | gcr.io/google-containers/addon-resizer:functional-616785                     |                             |         |         |                     |                     |
	|                | --alsologtostderr                                                            |                             |         |         |                     |                     |
	| image          | functional-616785                                                            | functional-616785           | jenkins | v1.32.0 | 01 Dec 23 19:02 UTC | 01 Dec 23 19:02 UTC |
	|                | image ls --format short                                                      |                             |         |         |                     |                     |
	|                | --alsologtostderr                                                            |                             |         |         |                     |                     |
	| image          | functional-616785                                                            | functional-616785           | jenkins | v1.32.0 | 01 Dec 23 19:02 UTC | 01 Dec 23 19:02 UTC |
	|                | image ls --format yaml                                                       |                             |         |         |                     |                     |
	|                | --alsologtostderr                                                            |                             |         |         |                     |                     |
	| image          | functional-616785                                                            | functional-616785           | jenkins | v1.32.0 | 01 Dec 23 19:02 UTC | 01 Dec 23 19:02 UTC |
	|                | image ls --format json                                                       |                             |         |         |                     |                     |
	|                | --alsologtostderr                                                            |                             |         |         |                     |                     |
	| image          | functional-616785                                                            | functional-616785           | jenkins | v1.32.0 | 01 Dec 23 19:02 UTC | 01 Dec 23 19:02 UTC |
	|                | image ls --format table                                                      |                             |         |         |                     |                     |
	|                | --alsologtostderr                                                            |                             |         |         |                     |                     |
	| ssh            | functional-616785 ssh pgrep                                                  | functional-616785           | jenkins | v1.32.0 | 01 Dec 23 19:02 UTC |                     |
	|                | buildkitd                                                                    |                             |         |         |                     |                     |
	| image          | functional-616785 image build -t                                             | functional-616785           | jenkins | v1.32.0 | 01 Dec 23 19:02 UTC | 01 Dec 23 19:02 UTC |
	|                | localhost/my-image:functional-616785                                         |                             |         |         |                     |                     |
	|                | testdata/build --alsologtostderr                                             |                             |         |         |                     |                     |
	| image          | functional-616785 image ls                                                   | functional-616785           | jenkins | v1.32.0 | 01 Dec 23 19:02 UTC | 01 Dec 23 19:02 UTC |
	| delete         | -p functional-616785                                                         | functional-616785           | jenkins | v1.32.0 | 01 Dec 23 19:02 UTC | 01 Dec 23 19:02 UTC |
	| start          | -p ingress-addon-legacy-853196                                               | ingress-addon-legacy-853196 | jenkins | v1.32.0 | 01 Dec 23 19:02 UTC | 01 Dec 23 19:04 UTC |
	|                | --kubernetes-version=v1.18.20                                                |                             |         |         |                     |                     |
	|                | --memory=4096 --wait=true                                                    |                             |         |         |                     |                     |
	|                | --alsologtostderr                                                            |                             |         |         |                     |                     |
	|                | -v=5 --driver=docker                                                         |                             |         |         |                     |                     |
	|                | --container-runtime=containerd                                               |                             |         |         |                     |                     |
	| addons         | ingress-addon-legacy-853196                                                  | ingress-addon-legacy-853196 | jenkins | v1.32.0 | 01 Dec 23 19:04 UTC | 01 Dec 23 19:04 UTC |
	|                | addons enable ingress                                                        |                             |         |         |                     |                     |
	|                | --alsologtostderr -v=5                                                       |                             |         |         |                     |                     |
	| addons         | ingress-addon-legacy-853196                                                  | ingress-addon-legacy-853196 | jenkins | v1.32.0 | 01 Dec 23 19:04 UTC | 01 Dec 23 19:04 UTC |
	|                | addons enable ingress-dns                                                    |                             |         |         |                     |                     |
	|                | --alsologtostderr -v=5                                                       |                             |         |         |                     |                     |
	| ssh            | ingress-addon-legacy-853196                                                  | ingress-addon-legacy-853196 | jenkins | v1.32.0 | 01 Dec 23 19:04 UTC | 01 Dec 23 19:04 UTC |
	|                | ssh curl -s http://127.0.0.1/                                                |                             |         |         |                     |                     |
	|                | -H 'Host: nginx.example.com'                                                 |                             |         |         |                     |                     |
	| ip             | ingress-addon-legacy-853196 ip                                               | ingress-addon-legacy-853196 | jenkins | v1.32.0 | 01 Dec 23 19:04 UTC | 01 Dec 23 19:04 UTC |
	| addons         | ingress-addon-legacy-853196                                                  | ingress-addon-legacy-853196 | jenkins | v1.32.0 | 01 Dec 23 19:05 UTC | 01 Dec 23 19:05 UTC |
	|                | addons disable ingress-dns                                                   |                             |         |         |                     |                     |
	|                | --alsologtostderr -v=1                                                       |                             |         |         |                     |                     |
	| addons         | ingress-addon-legacy-853196                                                  | ingress-addon-legacy-853196 | jenkins | v1.32.0 | 01 Dec 23 19:05 UTC | 01 Dec 23 19:05 UTC |
	|                | addons disable ingress                                                       |                             |         |         |                     |                     |
	|                | --alsologtostderr -v=1                                                       |                             |         |         |                     |                     |
	|----------------|------------------------------------------------------------------------------|-----------------------------|---------|---------|---------------------|---------------------|
	
	* 
	* ==> Last Start <==
	* Log file created at: 2023/12/01 19:02:51
	Running on machine: ip-172-31-31-251
	Binary: Built with gc go1.21.4 for linux/arm64
	Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
	I1201 19:02:51.251296  292692 out.go:296] Setting OutFile to fd 1 ...
	I1201 19:02:51.251520  292692 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I1201 19:02:51.251533  292692 out.go:309] Setting ErrFile to fd 2...
	I1201 19:02:51.251539  292692 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I1201 19:02:51.251837  292692 root.go:338] Updating PATH: /home/jenkins/minikube-integration/17703-252966/.minikube/bin
	I1201 19:02:51.252293  292692 out.go:303] Setting JSON to false
	I1201 19:02:51.253256  292692 start.go:128] hostinfo: {"hostname":"ip-172-31-31-251","uptime":6318,"bootTime":1701451054,"procs":216,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1050-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"982e3628-3742-4b3e-bb63-ac1b07660ec7"}
	I1201 19:02:51.253336  292692 start.go:138] virtualization:  
	I1201 19:02:51.255894  292692 out.go:177] * [ingress-addon-legacy-853196] minikube v1.32.0 on Ubuntu 20.04 (arm64)
	I1201 19:02:51.258064  292692 out.go:177]   - MINIKUBE_LOCATION=17703
	I1201 19:02:51.259888  292692 out.go:177]   - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I1201 19:02:51.258275  292692 notify.go:220] Checking for updates...
	I1201 19:02:51.261510  292692 out.go:177]   - KUBECONFIG=/home/jenkins/minikube-integration/17703-252966/kubeconfig
	I1201 19:02:51.263239  292692 out.go:177]   - MINIKUBE_HOME=/home/jenkins/minikube-integration/17703-252966/.minikube
	I1201 19:02:51.265060  292692 out.go:177]   - MINIKUBE_BIN=out/minikube-linux-arm64
	I1201 19:02:51.266883  292692 out.go:177]   - MINIKUBE_FORCE_SYSTEMD=
	I1201 19:02:51.268894  292692 driver.go:392] Setting default libvirt URI to qemu:///system
	I1201 19:02:51.292874  292692 docker.go:122] docker version: linux-24.0.7:Docker Engine - Community
	I1201 19:02:51.293006  292692 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I1201 19:02:51.377828  292692 info.go:266] docker info: {ID:EOU5:DNGX:XN6V:L2FZ:UXRM:5TWK:EVUR:KC2F:GT7Z:Y4O4:GB77:5PD3 Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:23 OomKillDisable:true NGoroutines:35 SystemTime:2023-12-01 19:02:51.367998952 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1050-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archi
tecture:aarch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8215040000 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-31-251 Labels:[] ExperimentalBuild:false ServerVersion:24.0.7 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f Expected:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f} RuncCommit:{ID:v1.1.10-0-g18a0cb0 Expected:v1.1.10-0-g18a0cb0} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.21.0]] Warnings:<nil>}}
	I1201 19:02:51.377932  292692 docker.go:295] overlay module found
	I1201 19:02:51.380146  292692 out.go:177] * Using the docker driver based on user configuration
	I1201 19:02:51.381761  292692 start.go:298] selected driver: docker
	I1201 19:02:51.381785  292692 start.go:902] validating driver "docker" against <nil>
	I1201 19:02:51.381799  292692 start.go:913] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
	I1201 19:02:51.382483  292692 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I1201 19:02:51.450964  292692 info.go:266] docker info: {ID:EOU5:DNGX:XN6V:L2FZ:UXRM:5TWK:EVUR:KC2F:GT7Z:Y4O4:GB77:5PD3 Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:23 OomKillDisable:true NGoroutines:35 SystemTime:2023-12-01 19:02:51.439304665 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1050-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archi
tecture:aarch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8215040000 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-31-251 Labels:[] ExperimentalBuild:false ServerVersion:24.0.7 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f Expected:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f} RuncCommit:{ID:v1.1.10-0-g18a0cb0 Expected:v1.1.10-0-g18a0cb0} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.21.0]] Warnings:<nil>}}
	I1201 19:02:51.451127  292692 start_flags.go:309] no existing cluster config was found, will generate one from the flags 
	I1201 19:02:51.451408  292692 start_flags.go:931] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
	I1201 19:02:51.453363  292692 out.go:177] * Using Docker driver with root privileges
	I1201 19:02:51.455390  292692 cni.go:84] Creating CNI manager for ""
	I1201 19:02:51.455412  292692 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
	I1201 19:02:51.455426  292692 start_flags.go:318] Found "CNI" CNI - setting NetworkPlugin=cni
	I1201 19:02:51.455437  292692 start_flags.go:323] config:
	{Name:ingress-addon-legacy-853196 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f Memory:4096 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.18.20 ClusterName:ingress-addon-legacy-853196 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.lo
cal ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 AutoPauseInterval:1m0s GPUs:}
	I1201 19:02:51.457648  292692 out.go:177] * Starting control plane node ingress-addon-legacy-853196 in cluster ingress-addon-legacy-853196
	I1201 19:02:51.459356  292692 cache.go:121] Beginning downloading kic base image for docker with containerd
	I1201 19:02:51.461255  292692 out.go:177] * Pulling base image ...
	I1201 19:02:51.463107  292692 preload.go:132] Checking if preload exists for k8s version v1.18.20 and runtime containerd
	I1201 19:02:51.463149  292692 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f in local docker daemon
	I1201 19:02:51.481812  292692 image.go:83] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f in local docker daemon, skipping pull
	I1201 19:02:51.481839  292692 cache.go:144] gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f exists in daemon, skipping load
	I1201 19:02:51.528047  292692 preload.go:119] Found remote preload: https://storage.googleapis.com/minikube-preloaded-volume-tarballs/v18/v1.18.20/preloaded-images-k8s-v18-v1.18.20-containerd-overlay2-arm64.tar.lz4
	I1201 19:02:51.528072  292692 cache.go:56] Caching tarball of preloaded images
	I1201 19:02:51.528695  292692 preload.go:132] Checking if preload exists for k8s version v1.18.20 and runtime containerd
	I1201 19:02:51.530743  292692 out.go:177] * Downloading Kubernetes v1.18.20 preload ...
	I1201 19:02:51.532665  292692 preload.go:238] getting checksum for preloaded-images-k8s-v18-v1.18.20-containerd-overlay2-arm64.tar.lz4 ...
	I1201 19:02:51.647968  292692 download.go:107] Downloading: https://storage.googleapis.com/minikube-preloaded-volume-tarballs/v18/v1.18.20/preloaded-images-k8s-v18-v1.18.20-containerd-overlay2-arm64.tar.lz4?checksum=md5:9e505be2989b8c051b1372c317471064 -> /home/jenkins/minikube-integration/17703-252966/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.18.20-containerd-overlay2-arm64.tar.lz4
	I1201 19:03:09.352192  292692 preload.go:249] saving checksum for preloaded-images-k8s-v18-v1.18.20-containerd-overlay2-arm64.tar.lz4 ...
	I1201 19:03:09.352300  292692 preload.go:256] verifying checksum of /home/jenkins/minikube-integration/17703-252966/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.18.20-containerd-overlay2-arm64.tar.lz4 ...
	I1201 19:03:10.548691  292692 cache.go:59] Finished verifying existence of preloaded tar for  v1.18.20 on containerd
	I1201 19:03:10.549087  292692 profile.go:148] Saving config to /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/config.json ...
	I1201 19:03:10.549122  292692 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/config.json: {Name:mkf3e474409b5e90a1aa4c29f556befb30a81225 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1201 19:03:10.549304  292692 cache.go:194] Successfully downloaded all kic artifacts
	I1201 19:03:10.549353  292692 start.go:365] acquiring machines lock for ingress-addon-legacy-853196: {Name:mkd1ffc7ac002e36deee7b055d221bdd9dfda393 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I1201 19:03:10.549413  292692 start.go:369] acquired machines lock for "ingress-addon-legacy-853196" in 45.242µs
	I1201 19:03:10.549434  292692 start.go:93] Provisioning new machine with config: &{Name:ingress-addon-legacy-853196 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f Memory:4096 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.18.20 ClusterName:ingress-addon-legacy-853196 Namespace:defau
lt APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.18.20 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker Binar
yMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 AutoPauseInterval:1m0s GPUs:} &{Name: IP: Port:8443 KubernetesVersion:v1.18.20 ContainerRuntime:containerd ControlPlane:true Worker:true}
	I1201 19:03:10.549503  292692 start.go:125] createHost starting for "" (driver="docker")
	I1201 19:03:10.552226  292692 out.go:204] * Creating docker container (CPUs=2, Memory=4096MB) ...
	I1201 19:03:10.552456  292692 start.go:159] libmachine.API.Create for "ingress-addon-legacy-853196" (driver="docker")
	I1201 19:03:10.552503  292692 client.go:168] LocalClient.Create starting
	I1201 19:03:10.552575  292692 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca.pem
	I1201 19:03:10.552613  292692 main.go:141] libmachine: Decoding PEM data...
	I1201 19:03:10.552632  292692 main.go:141] libmachine: Parsing certificate...
	I1201 19:03:10.552693  292692 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/17703-252966/.minikube/certs/cert.pem
	I1201 19:03:10.552717  292692 main.go:141] libmachine: Decoding PEM data...
	I1201 19:03:10.552731  292692 main.go:141] libmachine: Parsing certificate...
	I1201 19:03:10.553104  292692 cli_runner.go:164] Run: docker network inspect ingress-addon-legacy-853196 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
	W1201 19:03:10.570466  292692 cli_runner.go:211] docker network inspect ingress-addon-legacy-853196 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
	I1201 19:03:10.570553  292692 network_create.go:281] running [docker network inspect ingress-addon-legacy-853196] to gather additional debugging logs...
	I1201 19:03:10.570576  292692 cli_runner.go:164] Run: docker network inspect ingress-addon-legacy-853196
	W1201 19:03:10.587545  292692 cli_runner.go:211] docker network inspect ingress-addon-legacy-853196 returned with exit code 1
	I1201 19:03:10.587583  292692 network_create.go:284] error running [docker network inspect ingress-addon-legacy-853196]: docker network inspect ingress-addon-legacy-853196: exit status 1
	stdout:
	[]
	
	stderr:
	Error response from daemon: network ingress-addon-legacy-853196 not found
	I1201 19:03:10.587602  292692 network_create.go:286] output of [docker network inspect ingress-addon-legacy-853196]: -- stdout --
	[]
	
	-- /stdout --
	** stderr ** 
	Error response from daemon: network ingress-addon-legacy-853196 not found
	
	** /stderr **
	I1201 19:03:10.587719  292692 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
	I1201 19:03:10.606054  292692 network.go:209] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x400200e570}
	I1201 19:03:10.606097  292692 network_create.go:124] attempt to create docker network ingress-addon-legacy-853196 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
	I1201 19:03:10.606156  292692 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=ingress-addon-legacy-853196 ingress-addon-legacy-853196
	I1201 19:03:10.683576  292692 network_create.go:108] docker network ingress-addon-legacy-853196 192.168.49.0/24 created
	I1201 19:03:10.683611  292692 kic.go:121] calculated static IP "192.168.49.2" for the "ingress-addon-legacy-853196" container
	I1201 19:03:10.683684  292692 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
	I1201 19:03:10.700943  292692 cli_runner.go:164] Run: docker volume create ingress-addon-legacy-853196 --label name.minikube.sigs.k8s.io=ingress-addon-legacy-853196 --label created_by.minikube.sigs.k8s.io=true
	I1201 19:03:10.719747  292692 oci.go:103] Successfully created a docker volume ingress-addon-legacy-853196
	I1201 19:03:10.719858  292692 cli_runner.go:164] Run: docker run --rm --name ingress-addon-legacy-853196-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ingress-addon-legacy-853196 --entrypoint /usr/bin/test -v ingress-addon-legacy-853196:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f -d /var/lib
	I1201 19:03:12.251696  292692 cli_runner.go:217] Completed: docker run --rm --name ingress-addon-legacy-853196-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ingress-addon-legacy-853196 --entrypoint /usr/bin/test -v ingress-addon-legacy-853196:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f -d /var/lib: (1.531787576s)
	I1201 19:03:12.251728  292692 oci.go:107] Successfully prepared a docker volume ingress-addon-legacy-853196
	I1201 19:03:12.251755  292692 preload.go:132] Checking if preload exists for k8s version v1.18.20 and runtime containerd
	I1201 19:03:12.251775  292692 kic.go:194] Starting extracting preloaded images to volume ...
	I1201 19:03:12.251857  292692 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/17703-252966/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.18.20-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v ingress-addon-legacy-853196:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f -I lz4 -xf /preloaded.tar -C /extractDir
	I1201 19:03:17.223032  292692 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/17703-252966/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.18.20-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v ingress-addon-legacy-853196:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f -I lz4 -xf /preloaded.tar -C /extractDir: (4.97112663s)
	I1201 19:03:17.223064  292692 kic.go:203] duration metric: took 4.971286 seconds to extract preloaded images to volume
	W1201 19:03:17.223211  292692 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
	I1201 19:03:17.223346  292692 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
	I1201 19:03:17.291256  292692 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname ingress-addon-legacy-853196 --name ingress-addon-legacy-853196 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ingress-addon-legacy-853196 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=ingress-addon-legacy-853196 --network ingress-addon-legacy-853196 --ip 192.168.49.2 --volume ingress-addon-legacy-853196:/var --security-opt apparmor=unconfined --memory=4096mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f
	I1201 19:03:17.664412  292692 cli_runner.go:164] Run: docker container inspect ingress-addon-legacy-853196 --format={{.State.Running}}
	I1201 19:03:17.700818  292692 cli_runner.go:164] Run: docker container inspect ingress-addon-legacy-853196 --format={{.State.Status}}
	I1201 19:03:17.725896  292692 cli_runner.go:164] Run: docker exec ingress-addon-legacy-853196 stat /var/lib/dpkg/alternatives/iptables
	I1201 19:03:17.812534  292692 oci.go:144] the created container "ingress-addon-legacy-853196" has a running status.
	I1201 19:03:17.812563  292692 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/17703-252966/.minikube/machines/ingress-addon-legacy-853196/id_rsa...
	I1201 19:03:18.441873  292692 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/17703-252966/.minikube/machines/ingress-addon-legacy-853196/id_rsa.pub -> /home/docker/.ssh/authorized_keys
	I1201 19:03:18.441923  292692 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/17703-252966/.minikube/machines/ingress-addon-legacy-853196/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
	I1201 19:03:18.474581  292692 cli_runner.go:164] Run: docker container inspect ingress-addon-legacy-853196 --format={{.State.Status}}
	I1201 19:03:18.499816  292692 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
	I1201 19:03:18.499877  292692 kic_runner.go:114] Args: [docker exec --privileged ingress-addon-legacy-853196 chown docker:docker /home/docker/.ssh/authorized_keys]
	I1201 19:03:18.612851  292692 cli_runner.go:164] Run: docker container inspect ingress-addon-legacy-853196 --format={{.State.Status}}
	I1201 19:03:18.634589  292692 machine.go:88] provisioning docker machine ...
	I1201 19:03:18.634626  292692 ubuntu.go:169] provisioning hostname "ingress-addon-legacy-853196"
	I1201 19:03:18.634695  292692 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ingress-addon-legacy-853196
	I1201 19:03:18.662872  292692 main.go:141] libmachine: Using SSH client type: native
	I1201 19:03:18.663309  292692 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3be600] 0x3c0d70 <nil>  [] 0s} 127.0.0.1 33103 <nil> <nil>}
	I1201 19:03:18.663323  292692 main.go:141] libmachine: About to run SSH command:
	sudo hostname ingress-addon-legacy-853196 && echo "ingress-addon-legacy-853196" | sudo tee /etc/hostname
	I1201 19:03:18.839771  292692 main.go:141] libmachine: SSH cmd err, output: <nil>: ingress-addon-legacy-853196
	
	I1201 19:03:18.840019  292692 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ingress-addon-legacy-853196
	I1201 19:03:18.871861  292692 main.go:141] libmachine: Using SSH client type: native
	I1201 19:03:18.872274  292692 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3be600] 0x3c0d70 <nil>  [] 0s} 127.0.0.1 33103 <nil> <nil>}
	I1201 19:03:18.872300  292692 main.go:141] libmachine: About to run SSH command:
	
			if ! grep -xq '.*\singress-addon-legacy-853196' /etc/hosts; then
				if grep -xq '127.0.1.1\s.*' /etc/hosts; then
					sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 ingress-addon-legacy-853196/g' /etc/hosts;
				else 
					echo '127.0.1.1 ingress-addon-legacy-853196' | sudo tee -a /etc/hosts; 
				fi
			fi
	I1201 19:03:19.026066  292692 main.go:141] libmachine: SSH cmd err, output: <nil>: 
	I1201 19:03:19.026144  292692 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/17703-252966/.minikube CaCertPath:/home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/17703-252966/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/17703-252966/.minikube}
	I1201 19:03:19.026179  292692 ubuntu.go:177] setting up certificates
	I1201 19:03:19.026218  292692 provision.go:83] configureAuth start
	I1201 19:03:19.026303  292692 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ingress-addon-legacy-853196
	I1201 19:03:19.047809  292692 provision.go:138] copyHostCerts
	I1201 19:03:19.047847  292692 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca.pem -> /home/jenkins/minikube-integration/17703-252966/.minikube/ca.pem
	I1201 19:03:19.047934  292692 exec_runner.go:144] found /home/jenkins/minikube-integration/17703-252966/.minikube/ca.pem, removing ...
	I1201 19:03:19.047947  292692 exec_runner.go:203] rm: /home/jenkins/minikube-integration/17703-252966/.minikube/ca.pem
	I1201 19:03:19.048030  292692 exec_runner.go:151] cp: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/17703-252966/.minikube/ca.pem (1078 bytes)
	I1201 19:03:19.048146  292692 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/cert.pem -> /home/jenkins/minikube-integration/17703-252966/.minikube/cert.pem
	I1201 19:03:19.048168  292692 exec_runner.go:144] found /home/jenkins/minikube-integration/17703-252966/.minikube/cert.pem, removing ...
	I1201 19:03:19.048173  292692 exec_runner.go:203] rm: /home/jenkins/minikube-integration/17703-252966/.minikube/cert.pem
	I1201 19:03:19.048211  292692 exec_runner.go:151] cp: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/17703-252966/.minikube/cert.pem (1123 bytes)
	I1201 19:03:19.048262  292692 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/key.pem -> /home/jenkins/minikube-integration/17703-252966/.minikube/key.pem
	I1201 19:03:19.048296  292692 exec_runner.go:144] found /home/jenkins/minikube-integration/17703-252966/.minikube/key.pem, removing ...
	I1201 19:03:19.048300  292692 exec_runner.go:203] rm: /home/jenkins/minikube-integration/17703-252966/.minikube/key.pem
	I1201 19:03:19.048330  292692 exec_runner.go:151] cp: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/17703-252966/.minikube/key.pem (1679 bytes)
	I1201 19:03:19.048373  292692 provision.go:112] generating server cert: /home/jenkins/minikube-integration/17703-252966/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca-key.pem org=jenkins.ingress-addon-legacy-853196 san=[192.168.49.2 127.0.0.1 localhost 127.0.0.1 minikube ingress-addon-legacy-853196]
	I1201 19:03:19.353761  292692 provision.go:172] copyRemoteCerts
	I1201 19:03:19.353834  292692 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
	I1201 19:03:19.353876  292692 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ingress-addon-legacy-853196
	I1201 19:03:19.371592  292692 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33103 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/ingress-addon-legacy-853196/id_rsa Username:docker}
	I1201 19:03:19.475120  292692 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca.pem -> /etc/docker/ca.pem
	I1201 19:03:19.475187  292692 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
	I1201 19:03:19.503698  292692 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/17703-252966/.minikube/machines/server.pem -> /etc/docker/server.pem
	I1201 19:03:19.503761  292692 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/machines/server.pem --> /etc/docker/server.pem (1253 bytes)
	I1201 19:03:19.533569  292692 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/17703-252966/.minikube/machines/server-key.pem -> /etc/docker/server-key.pem
	I1201 19:03:19.533634  292692 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
	I1201 19:03:19.562660  292692 provision.go:86] duration metric: configureAuth took 536.411181ms
	I1201 19:03:19.562738  292692 ubuntu.go:193] setting minikube options for container-runtime
	I1201 19:03:19.562938  292692 config.go:182] Loaded profile config "ingress-addon-legacy-853196": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.18.20
	I1201 19:03:19.562953  292692 machine.go:91] provisioned docker machine in 928.344576ms
	I1201 19:03:19.562962  292692 client.go:171] LocalClient.Create took 9.010451635s
	I1201 19:03:19.562986  292692 start.go:167] duration metric: libmachine.API.Create for "ingress-addon-legacy-853196" took 9.010528352s
	I1201 19:03:19.562998  292692 start.go:300] post-start starting for "ingress-addon-legacy-853196" (driver="docker")
	I1201 19:03:19.563007  292692 start.go:329] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
	I1201 19:03:19.563062  292692 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
	I1201 19:03:19.563106  292692 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ingress-addon-legacy-853196
	I1201 19:03:19.581303  292692 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33103 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/ingress-addon-legacy-853196/id_rsa Username:docker}
	I1201 19:03:19.688101  292692 ssh_runner.go:195] Run: cat /etc/os-release
	I1201 19:03:19.692208  292692 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
	I1201 19:03:19.692249  292692 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
	I1201 19:03:19.692267  292692 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
	I1201 19:03:19.692284  292692 info.go:137] Remote host: Ubuntu 22.04.3 LTS
	I1201 19:03:19.692299  292692 filesync.go:126] Scanning /home/jenkins/minikube-integration/17703-252966/.minikube/addons for local assets ...
	I1201 19:03:19.692366  292692 filesync.go:126] Scanning /home/jenkins/minikube-integration/17703-252966/.minikube/files for local assets ...
	I1201 19:03:19.692455  292692 filesync.go:149] local asset: /home/jenkins/minikube-integration/17703-252966/.minikube/files/etc/ssl/certs/2583012.pem -> 2583012.pem in /etc/ssl/certs
	I1201 19:03:19.692488  292692 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/17703-252966/.minikube/files/etc/ssl/certs/2583012.pem -> /etc/ssl/certs/2583012.pem
	I1201 19:03:19.692601  292692 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
	I1201 19:03:19.703267  292692 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/files/etc/ssl/certs/2583012.pem --> /etc/ssl/certs/2583012.pem (1708 bytes)
	I1201 19:03:19.731978  292692 start.go:303] post-start completed in 168.964807ms
	I1201 19:03:19.732368  292692 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ingress-addon-legacy-853196
	I1201 19:03:19.750523  292692 profile.go:148] Saving config to /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/config.json ...
	I1201 19:03:19.750811  292692 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I1201 19:03:19.750865  292692 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ingress-addon-legacy-853196
	I1201 19:03:19.768630  292692 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33103 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/ingress-addon-legacy-853196/id_rsa Username:docker}
	I1201 19:03:19.870517  292692 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
	I1201 19:03:19.876701  292692 start.go:128] duration metric: createHost completed in 9.327181855s
	I1201 19:03:19.876725  292692 start.go:83] releasing machines lock for "ingress-addon-legacy-853196", held for 9.327300663s
	I1201 19:03:19.876815  292692 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ingress-addon-legacy-853196
	I1201 19:03:19.895986  292692 ssh_runner.go:195] Run: cat /version.json
	I1201 19:03:19.896046  292692 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ingress-addon-legacy-853196
	I1201 19:03:19.895994  292692 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
	I1201 19:03:19.896348  292692 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ingress-addon-legacy-853196
	I1201 19:03:19.915786  292692 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33103 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/ingress-addon-legacy-853196/id_rsa Username:docker}
	I1201 19:03:19.930051  292692 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33103 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/ingress-addon-legacy-853196/id_rsa Username:docker}
	I1201 19:03:20.024221  292692 ssh_runner.go:195] Run: systemctl --version
	I1201 19:03:20.170390  292692 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
	I1201 19:03:20.177657  292692 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
	I1201 19:03:20.210570  292692 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
	I1201 19:03:20.210679  292692 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%!p(MISSING), " -exec sh -c "sudo mv {} {}.mk_disabled" ;
	I1201 19:03:20.246478  292692 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
	I1201 19:03:20.246506  292692 start.go:475] detecting cgroup driver to use...
	I1201 19:03:20.246538  292692 detect.go:196] detected "cgroupfs" cgroup driver on host os
	I1201 19:03:20.246591  292692 ssh_runner.go:195] Run: sudo systemctl stop -f crio
	I1201 19:03:20.261404  292692 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
	I1201 19:03:20.275137  292692 docker.go:203] disabling cri-docker service (if available) ...
	I1201 19:03:20.275222  292692 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
	I1201 19:03:20.291952  292692 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
	I1201 19:03:20.309538  292692 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
	I1201 19:03:20.426216  292692 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
	I1201 19:03:20.527891  292692 docker.go:219] disabling docker service ...
	I1201 19:03:20.527976  292692 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
	I1201 19:03:20.549812  292692 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
	I1201 19:03:20.564455  292692 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
	I1201 19:03:20.666050  292692 ssh_runner.go:195] Run: sudo systemctl mask docker.service
	I1201 19:03:20.768040  292692 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
	I1201 19:03:20.784660  292692 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %!s(MISSING) "runtime-endpoint: unix:///run/containerd/containerd.sock
	" | sudo tee /etc/crictl.yaml"
	I1201 19:03:20.805792  292692 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.2"|' /etc/containerd/config.toml"
	I1201 19:03:20.818443  292692 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
	I1201 19:03:20.831156  292692 containerd.go:145] configuring containerd to use "cgroupfs" as cgroup driver...
	I1201 19:03:20.831338  292692 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
	I1201 19:03:20.844623  292692 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I1201 19:03:20.857023  292692 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
	I1201 19:03:20.870198  292692 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I1201 19:03:20.882309  292692 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
	I1201 19:03:20.894497  292692 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
	I1201 19:03:20.907183  292692 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
	I1201 19:03:20.917843  292692 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
	I1201 19:03:20.928306  292692 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1201 19:03:21.024699  292692 ssh_runner.go:195] Run: sudo systemctl restart containerd
	I1201 19:03:21.160386  292692 start.go:522] Will wait 60s for socket path /run/containerd/containerd.sock
	I1201 19:03:21.160549  292692 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
	I1201 19:03:21.165438  292692 start.go:543] Will wait 60s for crictl version
	I1201 19:03:21.165513  292692 ssh_runner.go:195] Run: which crictl
	I1201 19:03:21.170122  292692 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
	I1201 19:03:21.215733  292692 start.go:559] Version:  0.1.0
	RuntimeName:  containerd
	RuntimeVersion:  1.6.25
	RuntimeApiVersion:  v1
	I1201 19:03:21.215872  292692 ssh_runner.go:195] Run: containerd --version
	I1201 19:03:21.245301  292692 ssh_runner.go:195] Run: containerd --version
	I1201 19:03:21.271732  292692 out.go:177] * Preparing Kubernetes v1.18.20 on containerd 1.6.25 ...
	I1201 19:03:21.273719  292692 cli_runner.go:164] Run: docker network inspect ingress-addon-legacy-853196 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
	I1201 19:03:21.290782  292692 ssh_runner.go:195] Run: grep 192.168.49.1	host.minikube.internal$ /etc/hosts
	I1201 19:03:21.295415  292692 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1	host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I1201 19:03:21.308902  292692 preload.go:132] Checking if preload exists for k8s version v1.18.20 and runtime containerd
	I1201 19:03:21.308975  292692 ssh_runner.go:195] Run: sudo crictl images --output json
	I1201 19:03:21.349744  292692 containerd.go:600] couldn't find preloaded image for "registry.k8s.io/kube-apiserver:v1.18.20". assuming images are not preloaded.
	I1201 19:03:21.349818  292692 ssh_runner.go:195] Run: which lz4
	I1201 19:03:21.354470  292692 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/17703-252966/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.18.20-containerd-overlay2-arm64.tar.lz4 -> /preloaded.tar.lz4
	I1201 19:03:21.354578  292692 ssh_runner.go:195] Run: stat -c "%!s(MISSING) %!y(MISSING)" /preloaded.tar.lz4
	I1201 19:03:21.359154  292692 ssh_runner.go:352] existence check for /preloaded.tar.lz4: stat -c "%!s(MISSING) %!y(MISSING)" /preloaded.tar.lz4: Process exited with status 1
	stdout:
	
	stderr:
	stat: cannot statx '/preloaded.tar.lz4': No such file or directory
	I1201 19:03:21.359194  292692 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.18.20-containerd-overlay2-arm64.tar.lz4 --> /preloaded.tar.lz4 (489149349 bytes)
	I1201 19:03:23.691832  292692 containerd.go:547] Took 2.337292 seconds to copy over tarball
	I1201 19:03:23.691916  292692 ssh_runner.go:195] Run: sudo tar -I lz4 -C /var -xf /preloaded.tar.lz4
	I1201 19:03:26.409829  292692 ssh_runner.go:235] Completed: sudo tar -I lz4 -C /var -xf /preloaded.tar.lz4: (2.717880515s)
	I1201 19:03:26.409857  292692 containerd.go:554] Took 2.718002 seconds to extract the tarball
	I1201 19:03:26.409867  292692 ssh_runner.go:146] rm: /preloaded.tar.lz4
	I1201 19:03:26.550439  292692 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1201 19:03:26.649326  292692 ssh_runner.go:195] Run: sudo systemctl restart containerd
	I1201 19:03:26.794453  292692 ssh_runner.go:195] Run: sudo crictl images --output json
	I1201 19:03:26.837917  292692 containerd.go:600] couldn't find preloaded image for "registry.k8s.io/kube-apiserver:v1.18.20". assuming images are not preloaded.
	I1201 19:03:26.837944  292692 cache_images.go:88] LoadImages start: [registry.k8s.io/kube-apiserver:v1.18.20 registry.k8s.io/kube-controller-manager:v1.18.20 registry.k8s.io/kube-scheduler:v1.18.20 registry.k8s.io/kube-proxy:v1.18.20 registry.k8s.io/pause:3.2 registry.k8s.io/etcd:3.4.3-0 registry.k8s.io/coredns:1.6.7 gcr.io/k8s-minikube/storage-provisioner:v5]
	I1201 19:03:26.838026  292692 image.go:134] retrieving image: gcr.io/k8s-minikube/storage-provisioner:v5
	I1201 19:03:26.838238  292692 image.go:134] retrieving image: registry.k8s.io/kube-apiserver:v1.18.20
	I1201 19:03:26.838314  292692 image.go:134] retrieving image: registry.k8s.io/kube-controller-manager:v1.18.20
	I1201 19:03:26.838415  292692 image.go:134] retrieving image: registry.k8s.io/kube-scheduler:v1.18.20
	I1201 19:03:26.838493  292692 image.go:134] retrieving image: registry.k8s.io/kube-proxy:v1.18.20
	I1201 19:03:26.838561  292692 image.go:134] retrieving image: registry.k8s.io/pause:3.2
	I1201 19:03:26.838625  292692 image.go:134] retrieving image: registry.k8s.io/etcd:3.4.3-0
	I1201 19:03:26.838714  292692 image.go:134] retrieving image: registry.k8s.io/coredns:1.6.7
	I1201 19:03:26.840358  292692 image.go:177] daemon lookup for registry.k8s.io/etcd:3.4.3-0: Error response from daemon: No such image: registry.k8s.io/etcd:3.4.3-0
	I1201 19:03:26.840382  292692 image.go:177] daemon lookup for registry.k8s.io/kube-scheduler:v1.18.20: Error response from daemon: No such image: registry.k8s.io/kube-scheduler:v1.18.20
	I1201 19:03:26.840437  292692 image.go:177] daemon lookup for registry.k8s.io/kube-apiserver:v1.18.20: Error response from daemon: No such image: registry.k8s.io/kube-apiserver:v1.18.20
	I1201 19:03:26.840495  292692 image.go:177] daemon lookup for registry.k8s.io/kube-controller-manager:v1.18.20: Error response from daemon: No such image: registry.k8s.io/kube-controller-manager:v1.18.20
	I1201 19:03:26.840659  292692 image.go:177] daemon lookup for registry.k8s.io/pause:3.2: Error response from daemon: No such image: registry.k8s.io/pause:3.2
	I1201 19:03:26.840754  292692 image.go:177] daemon lookup for gcr.io/k8s-minikube/storage-provisioner:v5: Error response from daemon: No such image: gcr.io/k8s-minikube/storage-provisioner:v5
	I1201 19:03:26.840781  292692 image.go:177] daemon lookup for registry.k8s.io/kube-proxy:v1.18.20: Error response from daemon: No such image: registry.k8s.io/kube-proxy:v1.18.20
	I1201 19:03:26.840823  292692 image.go:177] daemon lookup for registry.k8s.io/coredns:1.6.7: Error response from daemon: No such image: registry.k8s.io/coredns:1.6.7
	W1201 19:03:27.200566  292692 image.go:265] image registry.k8s.io/kube-scheduler:v1.18.20 arch mismatch: want arm64 got amd64. fixing
	I1201 19:03:27.200706  292692 containerd.go:251] Checking existence of image with name "registry.k8s.io/kube-scheduler:v1.18.20" and sha "177548d745cb87f773d02f41d453af2f2a1479dbe3c32e749cf6d8145c005e79"
	I1201 19:03:27.200784  292692 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images check
	I1201 19:03:27.216806  292692 containerd.go:251] Checking existence of image with name "registry.k8s.io/pause:3.2" and sha "2a060e2e7101d419352bf82c613158587400be743482d9a537ec4a9d1b4eb93c"
	I1201 19:03:27.216945  292692 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images check
	W1201 19:03:27.223938  292692 image.go:265] image registry.k8s.io/kube-apiserver:v1.18.20 arch mismatch: want arm64 got amd64. fixing
	I1201 19:03:27.224133  292692 containerd.go:251] Checking existence of image with name "registry.k8s.io/kube-apiserver:v1.18.20" and sha "d353007847ec85700463981309a5846c8d9c93fbcd1323104266212926d68257"
	I1201 19:03:27.224214  292692 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images check
	W1201 19:03:27.228606  292692 image.go:265] image registry.k8s.io/coredns:1.6.7 arch mismatch: want arm64 got amd64. fixing
	I1201 19:03:27.228811  292692 containerd.go:251] Checking existence of image with name "registry.k8s.io/coredns:1.6.7" and sha "ff3af22d8878afc6985d3fec3e066d00ef431aa166c3a01ac58f1990adc92a2c"
	I1201 19:03:27.228891  292692 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images check
	W1201 19:03:27.231547  292692 image.go:265] image registry.k8s.io/etcd:3.4.3-0 arch mismatch: want arm64 got amd64. fixing
	I1201 19:03:27.231796  292692 containerd.go:251] Checking existence of image with name "registry.k8s.io/etcd:3.4.3-0" and sha "29dd247b2572efbe28fcaea3fef1c5d72593da59f7350e3f6d2e6618983f9c03"
	I1201 19:03:27.231903  292692 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images check
	W1201 19:03:27.255513  292692 image.go:265] image registry.k8s.io/kube-proxy:v1.18.20 arch mismatch: want arm64 got amd64. fixing
	I1201 19:03:27.255817  292692 containerd.go:251] Checking existence of image with name "registry.k8s.io/kube-proxy:v1.18.20" and sha "b11cdc97ac6ac4ef2b3b0662edbe16597084b17cbc8e3d61fcaf4ef827a7ed18"
	I1201 19:03:27.255908  292692 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images check
	W1201 19:03:27.259309  292692 image.go:265] image registry.k8s.io/kube-controller-manager:v1.18.20 arch mismatch: want arm64 got amd64. fixing
	I1201 19:03:27.259497  292692 containerd.go:251] Checking existence of image with name "registry.k8s.io/kube-controller-manager:v1.18.20" and sha "297c79afbdb81ceb4cf857e0c54a0de7b6ce7ebe01e6cab68fc8baf342be3ea7"
	I1201 19:03:27.259579  292692 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images check
	W1201 19:03:27.482210  292692 image.go:265] image gcr.io/k8s-minikube/storage-provisioner:v5 arch mismatch: want arm64 got amd64. fixing
	I1201 19:03:27.482412  292692 containerd.go:251] Checking existence of image with name "gcr.io/k8s-minikube/storage-provisioner:v5" and sha "66749159455b3f08c8318fe0233122f54d0f5889f9c5fdfb73c3fd9d99895b51"
	I1201 19:03:27.482503  292692 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images check
	I1201 19:03:27.583411  292692 cache_images.go:116] "registry.k8s.io/kube-scheduler:v1.18.20" needs transfer: "registry.k8s.io/kube-scheduler:v1.18.20" does not exist at hash "177548d745cb87f773d02f41d453af2f2a1479dbe3c32e749cf6d8145c005e79" in container runtime
	I1201 19:03:27.583524  292692 cri.go:218] Removing image: registry.k8s.io/kube-scheduler:v1.18.20
	I1201 19:03:27.583606  292692 ssh_runner.go:195] Run: which crictl
	I1201 19:03:27.826644  292692 cache_images.go:116] "registry.k8s.io/pause:3.2" needs transfer: "registry.k8s.io/pause:3.2" does not exist at hash "2a060e2e7101d419352bf82c613158587400be743482d9a537ec4a9d1b4eb93c" in container runtime
	I1201 19:03:27.826739  292692 cri.go:218] Removing image: registry.k8s.io/pause:3.2
	I1201 19:03:27.826822  292692 ssh_runner.go:195] Run: which crictl
	I1201 19:03:27.878591  292692 cache_images.go:116] "registry.k8s.io/kube-apiserver:v1.18.20" needs transfer: "registry.k8s.io/kube-apiserver:v1.18.20" does not exist at hash "d353007847ec85700463981309a5846c8d9c93fbcd1323104266212926d68257" in container runtime
	I1201 19:03:27.878769  292692 cri.go:218] Removing image: registry.k8s.io/kube-apiserver:v1.18.20
	I1201 19:03:27.878849  292692 ssh_runner.go:195] Run: which crictl
	I1201 19:03:27.879279  292692 cache_images.go:116] "registry.k8s.io/coredns:1.6.7" needs transfer: "registry.k8s.io/coredns:1.6.7" does not exist at hash "ff3af22d8878afc6985d3fec3e066d00ef431aa166c3a01ac58f1990adc92a2c" in container runtime
	I1201 19:03:27.879318  292692 cri.go:218] Removing image: registry.k8s.io/coredns:1.6.7
	I1201 19:03:27.879372  292692 ssh_runner.go:195] Run: which crictl
	I1201 19:03:27.981610  292692 cache_images.go:116] "registry.k8s.io/etcd:3.4.3-0" needs transfer: "registry.k8s.io/etcd:3.4.3-0" does not exist at hash "29dd247b2572efbe28fcaea3fef1c5d72593da59f7350e3f6d2e6618983f9c03" in container runtime
	I1201 19:03:27.981702  292692 cri.go:218] Removing image: registry.k8s.io/etcd:3.4.3-0
	I1201 19:03:27.981780  292692 ssh_runner.go:195] Run: which crictl
	I1201 19:03:28.002715  292692 cache_images.go:116] "registry.k8s.io/kube-proxy:v1.18.20" needs transfer: "registry.k8s.io/kube-proxy:v1.18.20" does not exist at hash "b11cdc97ac6ac4ef2b3b0662edbe16597084b17cbc8e3d61fcaf4ef827a7ed18" in container runtime
	I1201 19:03:28.002811  292692 cri.go:218] Removing image: registry.k8s.io/kube-proxy:v1.18.20
	I1201 19:03:28.002890  292692 ssh_runner.go:195] Run: which crictl
	I1201 19:03:28.003475  292692 cache_images.go:116] "registry.k8s.io/kube-controller-manager:v1.18.20" needs transfer: "registry.k8s.io/kube-controller-manager:v1.18.20" does not exist at hash "297c79afbdb81ceb4cf857e0c54a0de7b6ce7ebe01e6cab68fc8baf342be3ea7" in container runtime
	I1201 19:03:28.003518  292692 cri.go:218] Removing image: registry.k8s.io/kube-controller-manager:v1.18.20
	I1201 19:03:28.003584  292692 ssh_runner.go:195] Run: which crictl
	I1201 19:03:28.070186  292692 cache_images.go:116] "gcr.io/k8s-minikube/storage-provisioner:v5" needs transfer: "gcr.io/k8s-minikube/storage-provisioner:v5" does not exist at hash "66749159455b3f08c8318fe0233122f54d0f5889f9c5fdfb73c3fd9d99895b51" in container runtime
	I1201 19:03:28.070230  292692 cri.go:218] Removing image: gcr.io/k8s-minikube/storage-provisioner:v5
	I1201 19:03:28.070274  292692 ssh_runner.go:195] Run: which crictl
	I1201 19:03:28.070309  292692 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.18.20
	I1201 19:03:28.070367  292692 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi registry.k8s.io/pause:3.2
	I1201 19:03:28.070426  292692 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi registry.k8s.io/coredns:1.6.7
	I1201 19:03:28.070481  292692 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.18.20
	I1201 19:03:28.070561  292692 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi registry.k8s.io/etcd:3.4.3-0
	I1201 19:03:28.070606  292692 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi registry.k8s.io/kube-proxy:v1.18.20
	I1201 19:03:28.070581  292692 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.18.20
	I1201 19:03:28.260035  292692 cache_images.go:286] Loading image from: /home/jenkins/minikube-integration/17703-252966/.minikube/cache/images/arm64/registry.k8s.io/kube-controller-manager_v1.18.20
	I1201 19:03:28.260094  292692 cache_images.go:286] Loading image from: /home/jenkins/minikube-integration/17703-252966/.minikube/cache/images/arm64/registry.k8s.io/kube-apiserver_v1.18.20
	I1201 19:03:28.260127  292692 cache_images.go:286] Loading image from: /home/jenkins/minikube-integration/17703-252966/.minikube/cache/images/arm64/registry.k8s.io/etcd_3.4.3-0
	I1201 19:03:28.260156  292692 cache_images.go:286] Loading image from: /home/jenkins/minikube-integration/17703-252966/.minikube/cache/images/arm64/registry.k8s.io/kube-proxy_v1.18.20
	I1201 19:03:28.260185  292692 cache_images.go:286] Loading image from: /home/jenkins/minikube-integration/17703-252966/.minikube/cache/images/arm64/registry.k8s.io/coredns_1.6.7
	I1201 19:03:28.260233  292692 cache_images.go:286] Loading image from: /home/jenkins/minikube-integration/17703-252966/.minikube/cache/images/arm64/registry.k8s.io/kube-scheduler_v1.18.20
	I1201 19:03:28.260283  292692 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
	I1201 19:03:28.260363  292692 cache_images.go:286] Loading image from: /home/jenkins/minikube-integration/17703-252966/.minikube/cache/images/arm64/registry.k8s.io/pause_3.2
	I1201 19:03:28.313157  292692 cache_images.go:286] Loading image from: /home/jenkins/minikube-integration/17703-252966/.minikube/cache/images/arm64/gcr.io/k8s-minikube/storage-provisioner_v5
	I1201 19:03:28.313229  292692 cache_images.go:92] LoadImages completed in 1.475272966s
	W1201 19:03:28.313302  292692 out.go:239] X Unable to load cached images: loading cached images: stat /home/jenkins/minikube-integration/17703-252966/.minikube/cache/images/arm64/registry.k8s.io/kube-controller-manager_v1.18.20: no such file or directory
	I1201 19:03:28.313396  292692 ssh_runner.go:195] Run: sudo crictl info
	I1201 19:03:28.355838  292692 cni.go:84] Creating CNI manager for ""
	I1201 19:03:28.355911  292692 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
	I1201 19:03:28.355954  292692 kubeadm.go:87] Using pod CIDR: 10.244.0.0/16
	I1201 19:03:28.356002  292692 kubeadm.go:176] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.18.20 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:ingress-addon-legacy-853196 NodeName:ingress-addon-legacy-853196 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/ce
rts/ca.crt StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:false}
	I1201 19:03:28.356198  292692 kubeadm.go:181] kubeadm config:
	apiVersion: kubeadm.k8s.io/v1beta2
	kind: InitConfiguration
	localAPIEndpoint:
	  advertiseAddress: 192.168.49.2
	  bindPort: 8443
	bootstrapTokens:
	  - groups:
	      - system:bootstrappers:kubeadm:default-node-token
	    ttl: 24h0m0s
	    usages:
	      - signing
	      - authentication
	nodeRegistration:
	  criSocket: /run/containerd/containerd.sock
	  name: "ingress-addon-legacy-853196"
	  kubeletExtraArgs:
	    node-ip: 192.168.49.2
	  taints: []
	---
	apiVersion: kubeadm.k8s.io/v1beta2
	kind: ClusterConfiguration
	apiServer:
	  certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
	  extraArgs:
	    enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
	controllerManager:
	  extraArgs:
	    allocate-node-cidrs: "true"
	    leader-elect: "false"
	scheduler:
	  extraArgs:
	    leader-elect: "false"
	certificatesDir: /var/lib/minikube/certs
	clusterName: mk
	controlPlaneEndpoint: control-plane.minikube.internal:8443
	dns:
	  type: CoreDNS
	etcd:
	  local:
	    dataDir: /var/lib/minikube/etcd
	    extraArgs:
	      proxy-refresh-interval: "70000"
	kubernetesVersion: v1.18.20
	networking:
	  dnsDomain: cluster.local
	  podSubnet: "10.244.0.0/16"
	  serviceSubnet: 10.96.0.0/12
	---
	apiVersion: kubelet.config.k8s.io/v1beta1
	kind: KubeletConfiguration
	authentication:
	  x509:
	    clientCAFile: /var/lib/minikube/certs/ca.crt
	cgroupDriver: cgroupfs
	hairpinMode: hairpin-veth
	runtimeRequestTimeout: 15m
	clusterDomain: "cluster.local"
	# disable disk resource management by default
	imageGCHighThresholdPercent: 100
	evictionHard:
	  nodefs.available: "0%!"(MISSING)
	  nodefs.inodesFree: "0%!"(MISSING)
	  imagefs.available: "0%!"(MISSING)
	failSwapOn: false
	staticPodPath: /etc/kubernetes/manifests
	---
	apiVersion: kubeproxy.config.k8s.io/v1alpha1
	kind: KubeProxyConfiguration
	clusterCIDR: "10.244.0.0/16"
	metricsBindAddress: 0.0.0.0:10249
	conntrack:
	  maxPerCore: 0
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
	  tcpEstablishedTimeout: 0s
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
	  tcpCloseWaitTimeout: 0s
	
	I1201 19:03:28.356329  292692 kubeadm.go:976] kubelet [Unit]
	Wants=containerd.service
	
	[Service]
	ExecStart=
	ExecStart=/var/lib/minikube/binaries/v1.18.20/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --hostname-override=ingress-addon-legacy-853196 --kubeconfig=/etc/kubernetes/kubelet.conf --network-plugin=cni --node-ip=192.168.49.2
	
	[Install]
	 config:
	{KubernetesVersion:v1.18.20 ClusterName:ingress-addon-legacy-853196 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:}
	I1201 19:03:28.356430  292692 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.18.20
	I1201 19:03:28.366945  292692 binaries.go:44] Found k8s binaries, skipping transfer
	I1201 19:03:28.367015  292692 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
	I1201 19:03:28.377533  292692 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (448 bytes)
	I1201 19:03:28.399015  292692 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (353 bytes)
	I1201 19:03:28.421013  292692 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2131 bytes)
	I1201 19:03:28.441964  292692 ssh_runner.go:195] Run: grep 192.168.49.2	control-plane.minikube.internal$ /etc/hosts
	I1201 19:03:28.446708  292692 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.2	control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I1201 19:03:28.460224  292692 certs.go:56] Setting up /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196 for IP: 192.168.49.2
	I1201 19:03:28.460257  292692 certs.go:190] acquiring lock for shared ca certs: {Name:mk799b1e63d23a413d1b6e34a0169dabbea1b951 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1201 19:03:28.460408  292692 certs.go:199] skipping minikubeCA CA generation: /home/jenkins/minikube-integration/17703-252966/.minikube/ca.key
	I1201 19:03:28.460488  292692 certs.go:199] skipping proxyClientCA CA generation: /home/jenkins/minikube-integration/17703-252966/.minikube/proxy-client-ca.key
	I1201 19:03:28.460540  292692 certs.go:319] generating minikube-user signed cert: /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.key
	I1201 19:03:28.460555  292692 crypto.go:68] Generating cert /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.crt with IP's: []
	I1201 19:03:28.736512  292692 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.crt ...
	I1201 19:03:28.736544  292692 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.crt: {Name:mkabd3b8cd78086d27966995aac13a8b3c3937b9 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1201 19:03:28.737145  292692 crypto.go:164] Writing key to /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.key ...
	I1201 19:03:28.737164  292692 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.key: {Name:mke957297c190031114216a8398ad03f6f753ecf Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1201 19:03:28.737627  292692 certs.go:319] generating minikube signed cert: /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/apiserver.key.dd3b5fb2
	I1201 19:03:28.737648  292692 crypto.go:68] Generating cert /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/apiserver.crt.dd3b5fb2 with IP's: [192.168.49.2 10.96.0.1 127.0.0.1 10.0.0.1]
	I1201 19:03:30.003606  292692 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/apiserver.crt.dd3b5fb2 ...
	I1201 19:03:30.003640  292692 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/apiserver.crt.dd3b5fb2: {Name:mk3d5a1a54d283fba02c48f1436956748a5d1817 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1201 19:03:30.003831  292692 crypto.go:164] Writing key to /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/apiserver.key.dd3b5fb2 ...
	I1201 19:03:30.003846  292692 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/apiserver.key.dd3b5fb2: {Name:mk5e995298af883d290114eda3dccb88c189c9c1 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1201 19:03:30.004452  292692 certs.go:337] copying /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/apiserver.crt.dd3b5fb2 -> /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/apiserver.crt
	I1201 19:03:30.004554  292692 certs.go:341] copying /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/apiserver.key.dd3b5fb2 -> /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/apiserver.key
	I1201 19:03:30.004614  292692 certs.go:319] generating aggregator signed cert: /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/proxy-client.key
	I1201 19:03:30.004632  292692 crypto.go:68] Generating cert /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/proxy-client.crt with IP's: []
	I1201 19:03:30.410608  292692 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/proxy-client.crt ...
	I1201 19:03:30.410643  292692 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/proxy-client.crt: {Name:mk2f114fa08eb736af9c930be61aeb101aa98cab Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1201 19:03:30.410834  292692 crypto.go:164] Writing key to /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/proxy-client.key ...
	I1201 19:03:30.410848  292692 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/proxy-client.key: {Name:mke1372f33a9895ace368025a522d4eb6a0b0887 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1201 19:03:30.410945  292692 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/apiserver.crt -> /var/lib/minikube/certs/apiserver.crt
	I1201 19:03:30.410970  292692 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/apiserver.key -> /var/lib/minikube/certs/apiserver.key
	I1201 19:03:30.410990  292692 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/proxy-client.crt -> /var/lib/minikube/certs/proxy-client.crt
	I1201 19:03:30.411013  292692 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/proxy-client.key -> /var/lib/minikube/certs/proxy-client.key
	I1201 19:03:30.411033  292692 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/17703-252966/.minikube/ca.crt -> /var/lib/minikube/certs/ca.crt
	I1201 19:03:30.411049  292692 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/17703-252966/.minikube/ca.key -> /var/lib/minikube/certs/ca.key
	I1201 19:03:30.411065  292692 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/17703-252966/.minikube/proxy-client-ca.crt -> /var/lib/minikube/certs/proxy-client-ca.crt
	I1201 19:03:30.411080  292692 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/17703-252966/.minikube/proxy-client-ca.key -> /var/lib/minikube/certs/proxy-client-ca.key
	I1201 19:03:30.411133  292692 certs.go:437] found cert: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/home/jenkins/minikube-integration/17703-252966/.minikube/certs/258301.pem (1338 bytes)
	W1201 19:03:30.411171  292692 certs.go:433] ignoring /home/jenkins/minikube-integration/17703-252966/.minikube/certs/home/jenkins/minikube-integration/17703-252966/.minikube/certs/258301_empty.pem, impossibly tiny 0 bytes
	I1201 19:03:30.411184  292692 certs.go:437] found cert: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca-key.pem (1675 bytes)
	I1201 19:03:30.411211  292692 certs.go:437] found cert: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/home/jenkins/minikube-integration/17703-252966/.minikube/certs/ca.pem (1078 bytes)
	I1201 19:03:30.411243  292692 certs.go:437] found cert: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/home/jenkins/minikube-integration/17703-252966/.minikube/certs/cert.pem (1123 bytes)
	I1201 19:03:30.411269  292692 certs.go:437] found cert: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/home/jenkins/minikube-integration/17703-252966/.minikube/certs/key.pem (1679 bytes)
	I1201 19:03:30.411318  292692 certs.go:437] found cert: /home/jenkins/minikube-integration/17703-252966/.minikube/files/etc/ssl/certs/home/jenkins/minikube-integration/17703-252966/.minikube/files/etc/ssl/certs/2583012.pem (1708 bytes)
	I1201 19:03:30.411355  292692 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/17703-252966/.minikube/files/etc/ssl/certs/2583012.pem -> /usr/share/ca-certificates/2583012.pem
	I1201 19:03:30.411373  292692 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/17703-252966/.minikube/ca.crt -> /usr/share/ca-certificates/minikubeCA.pem
	I1201 19:03:30.411387  292692 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/17703-252966/.minikube/certs/258301.pem -> /usr/share/ca-certificates/258301.pem
	I1201 19:03:30.411945  292692 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1399 bytes)
	I1201 19:03:30.440739  292692 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
	I1201 19:03:30.470249  292692 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
	I1201 19:03:30.499049  292692 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
	I1201 19:03:30.527513  292692 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
	I1201 19:03:30.555902  292692 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
	I1201 19:03:30.584384  292692 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
	I1201 19:03:30.612581  292692 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
	I1201 19:03:30.641267  292692 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/files/etc/ssl/certs/2583012.pem --> /usr/share/ca-certificates/2583012.pem (1708 bytes)
	I1201 19:03:30.669764  292692 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
	I1201 19:03:30.698283  292692 ssh_runner.go:362] scp /home/jenkins/minikube-integration/17703-252966/.minikube/certs/258301.pem --> /usr/share/ca-certificates/258301.pem (1338 bytes)
	I1201 19:03:30.726831  292692 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
	I1201 19:03:30.748389  292692 ssh_runner.go:195] Run: openssl version
	I1201 19:03:30.755668  292692 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/2583012.pem && ln -fs /usr/share/ca-certificates/2583012.pem /etc/ssl/certs/2583012.pem"
	I1201 19:03:30.767673  292692 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/2583012.pem
	I1201 19:03:30.772569  292692 certs.go:480] hashing: -rw-r--r-- 1 root root 1708 Dec  1 18:58 /usr/share/ca-certificates/2583012.pem
	I1201 19:03:30.772635  292692 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/2583012.pem
	I1201 19:03:30.781553  292692 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/2583012.pem /etc/ssl/certs/3ec20f2e.0"
	I1201 19:03:30.793293  292692 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
	I1201 19:03:30.805105  292692 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
	I1201 19:03:30.809777  292692 certs.go:480] hashing: -rw-r--r-- 1 root root 1111 Dec  1 18:52 /usr/share/ca-certificates/minikubeCA.pem
	I1201 19:03:30.809887  292692 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
	I1201 19:03:30.818472  292692 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
	I1201 19:03:30.830698  292692 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/258301.pem && ln -fs /usr/share/ca-certificates/258301.pem /etc/ssl/certs/258301.pem"
	I1201 19:03:30.842507  292692 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/258301.pem
	I1201 19:03:30.847386  292692 certs.go:480] hashing: -rw-r--r-- 1 root root 1338 Dec  1 18:58 /usr/share/ca-certificates/258301.pem
	I1201 19:03:30.847506  292692 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/258301.pem
	I1201 19:03:30.856103  292692 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/258301.pem /etc/ssl/certs/51391683.0"
	I1201 19:03:30.867866  292692 ssh_runner.go:195] Run: ls /var/lib/minikube/certs/etcd
	I1201 19:03:30.872546  292692 certs.go:353] certs directory doesn't exist, likely first start: ls /var/lib/minikube/certs/etcd: Process exited with status 2
	stdout:
	
	stderr:
	ls: cannot access '/var/lib/minikube/certs/etcd': No such file or directory
	I1201 19:03:30.872598  292692 kubeadm.go:404] StartCluster: {Name:ingress-addon-legacy-853196 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f Memory:4096 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.18.20 ClusterName:ingress-addon-legacy-853196 Namespace:default APIServerName:minik
ubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.18.20 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: D
isableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 AutoPauseInterval:1m0s GPUs:}
	I1201 19:03:30.872688  292692 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
	I1201 19:03:30.872746  292692 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
	I1201 19:03:30.918455  292692 cri.go:89] found id: ""
	I1201 19:03:30.918525  292692 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
	I1201 19:03:30.929507  292692 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
	I1201 19:03:30.940501  292692 kubeadm.go:226] ignoring SystemVerification for kubeadm because of docker driver
	I1201 19:03:30.940601  292692 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
	I1201 19:03:30.951603  292692 kubeadm.go:152] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
	stdout:
	
	stderr:
	ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
	I1201 19:03:30.951696  292692 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.18.20:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml  --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
	I1201 19:03:31.010454  292692 kubeadm.go:322] [init] Using Kubernetes version: v1.18.20
	I1201 19:03:31.010535  292692 kubeadm.go:322] [preflight] Running pre-flight checks
	I1201 19:03:31.068339  292692 kubeadm.go:322] [preflight] The system verification failed. Printing the output from the verification:
	I1201 19:03:31.068493  292692 kubeadm.go:322] KERNEL_VERSION: 5.15.0-1050-aws
	I1201 19:03:31.068558  292692 kubeadm.go:322] OS: Linux
	I1201 19:03:31.068620  292692 kubeadm.go:322] CGROUPS_CPU: enabled
	I1201 19:03:31.068684  292692 kubeadm.go:322] CGROUPS_CPUACCT: enabled
	I1201 19:03:31.068763  292692 kubeadm.go:322] CGROUPS_CPUSET: enabled
	I1201 19:03:31.068834  292692 kubeadm.go:322] CGROUPS_DEVICES: enabled
	I1201 19:03:31.068915  292692 kubeadm.go:322] CGROUPS_FREEZER: enabled
	I1201 19:03:31.068991  292692 kubeadm.go:322] CGROUPS_MEMORY: enabled
	I1201 19:03:31.178207  292692 kubeadm.go:322] [preflight] Pulling images required for setting up a Kubernetes cluster
	I1201 19:03:31.178372  292692 kubeadm.go:322] [preflight] This might take a minute or two, depending on the speed of your internet connection
	I1201 19:03:31.178526  292692 kubeadm.go:322] [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
	I1201 19:03:31.424788  292692 kubeadm.go:322] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
	I1201 19:03:31.426704  292692 kubeadm.go:322] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
	I1201 19:03:31.427035  292692 kubeadm.go:322] [kubelet-start] Starting the kubelet
	I1201 19:03:31.546062  292692 kubeadm.go:322] [certs] Using certificateDir folder "/var/lib/minikube/certs"
	I1201 19:03:31.548288  292692 out.go:204]   - Generating certificates and keys ...
	I1201 19:03:31.548498  292692 kubeadm.go:322] [certs] Using existing ca certificate authority
	I1201 19:03:31.548567  292692 kubeadm.go:322] [certs] Using existing apiserver certificate and key on disk
	I1201 19:03:32.434614  292692 kubeadm.go:322] [certs] Generating "apiserver-kubelet-client" certificate and key
	I1201 19:03:33.351902  292692 kubeadm.go:322] [certs] Generating "front-proxy-ca" certificate and key
	I1201 19:03:33.731137  292692 kubeadm.go:322] [certs] Generating "front-proxy-client" certificate and key
	I1201 19:03:34.710365  292692 kubeadm.go:322] [certs] Generating "etcd/ca" certificate and key
	I1201 19:03:35.440571  292692 kubeadm.go:322] [certs] Generating "etcd/server" certificate and key
	I1201 19:03:35.440990  292692 kubeadm.go:322] [certs] etcd/server serving cert is signed for DNS names [ingress-addon-legacy-853196 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
	I1201 19:03:35.781337  292692 kubeadm.go:322] [certs] Generating "etcd/peer" certificate and key
	I1201 19:03:35.781763  292692 kubeadm.go:322] [certs] etcd/peer serving cert is signed for DNS names [ingress-addon-legacy-853196 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
	I1201 19:03:36.507620  292692 kubeadm.go:322] [certs] Generating "etcd/healthcheck-client" certificate and key
	I1201 19:03:37.043193  292692 kubeadm.go:322] [certs] Generating "apiserver-etcd-client" certificate and key
	I1201 19:03:37.735431  292692 kubeadm.go:322] [certs] Generating "sa" key and public key
	I1201 19:03:37.735842  292692 kubeadm.go:322] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
	I1201 19:03:37.897876  292692 kubeadm.go:322] [kubeconfig] Writing "admin.conf" kubeconfig file
	I1201 19:03:38.555787  292692 kubeadm.go:322] [kubeconfig] Writing "kubelet.conf" kubeconfig file
	I1201 19:03:38.921034  292692 kubeadm.go:322] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
	I1201 19:03:39.084140  292692 kubeadm.go:322] [kubeconfig] Writing "scheduler.conf" kubeconfig file
	I1201 19:03:39.085223  292692 kubeadm.go:322] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
	I1201 19:03:39.089436  292692 out.go:204]   - Booting up control plane ...
	I1201 19:03:39.089553  292692 kubeadm.go:322] [control-plane] Creating static Pod manifest for "kube-apiserver"
	I1201 19:03:39.093630  292692 kubeadm.go:322] [control-plane] Creating static Pod manifest for "kube-controller-manager"
	I1201 19:03:39.104040  292692 kubeadm.go:322] [control-plane] Creating static Pod manifest for "kube-scheduler"
	I1201 19:03:39.106972  292692 kubeadm.go:322] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
	I1201 19:03:39.117041  292692 kubeadm.go:322] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
	I1201 19:03:52.619583  292692 kubeadm.go:322] [apiclient] All control plane components are healthy after 13.502567 seconds
	I1201 19:03:52.619854  292692 kubeadm.go:322] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
	I1201 19:03:52.635389  292692 kubeadm.go:322] [kubelet] Creating a ConfigMap "kubelet-config-1.18" in namespace kube-system with the configuration for the kubelets in the cluster
	I1201 19:03:53.159867  292692 kubeadm.go:322] [upload-certs] Skipping phase. Please see --upload-certs
	I1201 19:03:53.160021  292692 kubeadm.go:322] [mark-control-plane] Marking the node ingress-addon-legacy-853196 as control-plane by adding the label "node-role.kubernetes.io/master=''"
	I1201 19:03:53.669730  292692 kubeadm.go:322] [bootstrap-token] Using token: 0bk153.r8wqasu92hrcrlf7
	I1201 19:03:53.671453  292692 out.go:204]   - Configuring RBAC rules ...
	I1201 19:03:53.671577  292692 kubeadm.go:322] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
	I1201 19:03:53.680302  292692 kubeadm.go:322] [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
	I1201 19:03:53.693871  292692 kubeadm.go:322] [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
	I1201 19:03:53.702021  292692 kubeadm.go:322] [bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
	I1201 19:03:53.705359  292692 kubeadm.go:322] [bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
	I1201 19:03:53.711683  292692 kubeadm.go:322] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
	I1201 19:03:53.734744  292692 kubeadm.go:322] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
	I1201 19:03:54.059958  292692 kubeadm.go:322] [addons] Applied essential addon: CoreDNS
	I1201 19:03:54.113209  292692 kubeadm.go:322] [addons] Applied essential addon: kube-proxy
	I1201 19:03:54.115691  292692 kubeadm.go:322] 
	I1201 19:03:54.115763  292692 kubeadm.go:322] Your Kubernetes control-plane has initialized successfully!
	I1201 19:03:54.115770  292692 kubeadm.go:322] 
	I1201 19:03:54.115842  292692 kubeadm.go:322] To start using your cluster, you need to run the following as a regular user:
	I1201 19:03:54.115848  292692 kubeadm.go:322] 
	I1201 19:03:54.115872  292692 kubeadm.go:322]   mkdir -p $HOME/.kube
	I1201 19:03:54.116688  292692 kubeadm.go:322]   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
	I1201 19:03:54.116745  292692 kubeadm.go:322]   sudo chown $(id -u):$(id -g) $HOME/.kube/config
	I1201 19:03:54.116751  292692 kubeadm.go:322] 
	I1201 19:03:54.116799  292692 kubeadm.go:322] You should now deploy a pod network to the cluster.
	I1201 19:03:54.116870  292692 kubeadm.go:322] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
	I1201 19:03:54.116944  292692 kubeadm.go:322]   https://kubernetes.io/docs/concepts/cluster-administration/addons/
	I1201 19:03:54.116950  292692 kubeadm.go:322] 
	I1201 19:03:54.117359  292692 kubeadm.go:322] You can now join any number of control-plane nodes by copying certificate authorities
	I1201 19:03:54.117439  292692 kubeadm.go:322] and service account keys on each node and then running the following as root:
	I1201 19:03:54.117445  292692 kubeadm.go:322] 
	I1201 19:03:54.118119  292692 kubeadm.go:322]   kubeadm join control-plane.minikube.internal:8443 --token 0bk153.r8wqasu92hrcrlf7 \
	I1201 19:03:54.118227  292692 kubeadm.go:322]     --discovery-token-ca-cert-hash sha256:6cba72ab59f3057936b959d729bb32b422b624e6e2da6be2a011dda16967004c \
	I1201 19:03:54.118528  292692 kubeadm.go:322]     --control-plane 
	I1201 19:03:54.118542  292692 kubeadm.go:322] 
	I1201 19:03:54.118917  292692 kubeadm.go:322] Then you can join any number of worker nodes by running the following on each as root:
	I1201 19:03:54.118928  292692 kubeadm.go:322] 
	I1201 19:03:54.119283  292692 kubeadm.go:322] kubeadm join control-plane.minikube.internal:8443 --token 0bk153.r8wqasu92hrcrlf7 \
	I1201 19:03:54.119631  292692 kubeadm.go:322]     --discovery-token-ca-cert-hash sha256:6cba72ab59f3057936b959d729bb32b422b624e6e2da6be2a011dda16967004c 
	I1201 19:03:54.125373  292692 kubeadm.go:322] W1201 19:03:31.009187    1090 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
	I1201 19:03:54.125722  292692 kubeadm.go:322] 	[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1050-aws\n", err: exit status 1
	I1201 19:03:54.125949  292692 kubeadm.go:322] 	[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
	I1201 19:03:54.126093  292692 kubeadm.go:322] W1201 19:03:39.100707    1090 manifests.go:225] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
	I1201 19:03:54.126225  292692 kubeadm.go:322] W1201 19:03:39.104508    1090 manifests.go:225] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
	I1201 19:03:54.127165  292692 cni.go:84] Creating CNI manager for ""
	I1201 19:03:54.127185  292692 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
	I1201 19:03:54.129392  292692 out.go:177] * Configuring CNI (Container Networking Interface) ...
	I1201 19:03:54.131359  292692 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
	I1201 19:03:54.137128  292692 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.18.20/kubectl ...
	I1201 19:03:54.137146  292692 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2438 bytes)
	I1201 19:03:54.170053  292692 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
	I1201 19:03:54.630573  292692 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
	I1201 19:03:54.630694  292692 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 19:03:54.630756  292692 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl label nodes minikube.k8s.io/version=v1.32.0 minikube.k8s.io/commit=a7798054841a94294fc1e610bab097fa7942f774 minikube.k8s.io/name=ingress-addon-legacy-853196 minikube.k8s.io/updated_at=2023_12_01T19_03_54_0700 minikube.k8s.io/primary=true --all --overwrite --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 19:03:54.803938  292692 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 19:03:54.804001  292692 ops.go:34] apiserver oom_adj: -16
	I1201 19:03:54.904857  292692 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 19:03:55.532380  292692 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 19:03:56.032781  292692 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 19:03:56.532266  292692 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 19:03:57.031879  292692 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 19:03:57.532536  292692 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 19:03:58.032641  292692 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 19:03:58.532739  292692 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 19:03:59.032073  292692 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 19:03:59.532523  292692 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 19:04:00.032897  292692 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 19:04:00.532399  292692 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 19:04:01.032515  292692 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 19:04:01.532235  292692 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 19:04:02.031850  292692 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 19:04:02.532750  292692 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 19:04:03.032063  292692 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 19:04:03.531849  292692 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 19:04:04.032627  292692 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 19:04:04.531818  292692 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 19:04:05.031865  292692 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 19:04:05.532592  292692 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 19:04:06.031794  292692 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 19:04:06.531886  292692 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 19:04:07.032426  292692 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 19:04:07.532524  292692 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 19:04:08.032694  292692 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 19:04:08.531971  292692 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 19:04:09.031825  292692 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 19:04:09.532523  292692 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1201 19:04:09.654515  292692 kubeadm.go:1088] duration metric: took 15.023863914s to wait for elevateKubeSystemPrivileges.
	I1201 19:04:09.654551  292692 kubeadm.go:406] StartCluster complete in 38.781956491s
	I1201 19:04:09.654568  292692 settings.go:142] acquiring lock: {Name:mk509c4de5b63e24c154062001ac3a5a349afe54 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1201 19:04:09.654635  292692 settings.go:150] Updating kubeconfig:  /home/jenkins/minikube-integration/17703-252966/kubeconfig
	I1201 19:04:09.655316  292692 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/17703-252966/kubeconfig: {Name:mk1b3fc1b8f9b6d7245434b6dbdc3c3d1a4130cc Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1201 19:04:09.655565  292692 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.18.20/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
	I1201 19:04:09.655862  292692 config.go:182] Loaded profile config "ingress-addon-legacy-853196": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.18.20
	I1201 19:04:09.655988  292692 addons.go:499] enable addons start: toEnable=map[ambassador:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false helm-tiller:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:false volumesnapshots:false]
	I1201 19:04:09.656069  292692 addons.go:69] Setting storage-provisioner=true in profile "ingress-addon-legacy-853196"
	I1201 19:04:09.656085  292692 addons.go:231] Setting addon storage-provisioner=true in "ingress-addon-legacy-853196"
	I1201 19:04:09.656141  292692 host.go:66] Checking if "ingress-addon-legacy-853196" exists ...
	I1201 19:04:09.656104  292692 kapi.go:59] client config for ingress-addon-legacy-853196: &rest.Config{Host:"https://192.168.49.2:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.crt", KeyFile:"/home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.key", CAFile:"/home/jenkins/minikube-integration/17703-252966/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]ui
nt8(nil), CAData:[]uint8(nil), NextProtos:[]string(nil)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x16c6350), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
	I1201 19:04:09.656636  292692 cli_runner.go:164] Run: docker container inspect ingress-addon-legacy-853196 --format={{.State.Status}}
	I1201 19:04:09.657298  292692 cert_rotation.go:137] Starting client certificate rotation controller
	I1201 19:04:09.657707  292692 addons.go:69] Setting default-storageclass=true in profile "ingress-addon-legacy-853196"
	I1201 19:04:09.657732  292692 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "ingress-addon-legacy-853196"
	I1201 19:04:09.658014  292692 cli_runner.go:164] Run: docker container inspect ingress-addon-legacy-853196 --format={{.State.Status}}
	I1201 19:04:09.705116  292692 kapi.go:59] client config for ingress-addon-legacy-853196: &rest.Config{Host:"https://192.168.49.2:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.crt", KeyFile:"/home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.key", CAFile:"/home/jenkins/minikube-integration/17703-252966/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]ui
nt8(nil), CAData:[]uint8(nil), NextProtos:[]string(nil)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x16c6350), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
	I1201 19:04:09.705381  292692 addons.go:231] Setting addon default-storageclass=true in "ingress-addon-legacy-853196"
	I1201 19:04:09.705415  292692 host.go:66] Checking if "ingress-addon-legacy-853196" exists ...
	I1201 19:04:09.705892  292692 cli_runner.go:164] Run: docker container inspect ingress-addon-legacy-853196 --format={{.State.Status}}
	I1201 19:04:09.725344  292692 out.go:177]   - Using image gcr.io/k8s-minikube/storage-provisioner:v5
	I1201 19:04:09.727272  292692 addons.go:423] installing /etc/kubernetes/addons/storage-provisioner.yaml
	I1201 19:04:09.727292  292692 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
	I1201 19:04:09.727354  292692 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ingress-addon-legacy-853196
	I1201 19:04:09.749594  292692 kapi.go:248] "coredns" deployment in "kube-system" namespace and "ingress-addon-legacy-853196" context rescaled to 1 replicas
	I1201 19:04:09.749633  292692 start.go:223] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.18.20 ContainerRuntime:containerd ControlPlane:true Worker:true}
	I1201 19:04:09.754713  292692 out.go:177] * Verifying Kubernetes components...
	I1201 19:04:09.757520  292692 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I1201 19:04:09.755704  292692 addons.go:423] installing /etc/kubernetes/addons/storageclass.yaml
	I1201 19:04:09.757621  292692 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
	I1201 19:04:09.757668  292692 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ingress-addon-legacy-853196
	I1201 19:04:09.775435  292692 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33103 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/ingress-addon-legacy-853196/id_rsa Username:docker}
	I1201 19:04:09.804584  292692 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33103 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/ingress-addon-legacy-853196/id_rsa Username:docker}
	I1201 19:04:09.962724  292692 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.18.20/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^        forward . \/etc\/resolv.conf.*/i \        hosts {\n           192.168.49.1 host.minikube.internal\n           fallthrough\n        }' -e '/^        errors *$/i \        log' | sudo /var/lib/minikube/binaries/v1.18.20/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
	I1201 19:04:09.963580  292692 kapi.go:59] client config for ingress-addon-legacy-853196: &rest.Config{Host:"https://192.168.49.2:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.crt", KeyFile:"/home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.key", CAFile:"/home/jenkins/minikube-integration/17703-252966/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]ui
nt8(nil), CAData:[]uint8(nil), NextProtos:[]string(nil)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x16c6350), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
	I1201 19:04:09.963935  292692 node_ready.go:35] waiting up to 6m0s for node "ingress-addon-legacy-853196" to be "Ready" ...
	I1201 19:04:09.968021  292692 node_ready.go:49] node "ingress-addon-legacy-853196" has status "Ready":"True"
	I1201 19:04:09.968089  292692 node_ready.go:38] duration metric: took 4.105854ms waiting for node "ingress-addon-legacy-853196" to be "Ready" ...
	I1201 19:04:09.968115  292692 pod_ready.go:35] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
	I1201 19:04:09.977807  292692 pod_ready.go:78] waiting up to 6m0s for pod "coredns-66bff467f8-v75d7" in "kube-system" namespace to be "Ready" ...
	I1201 19:04:10.055868  292692 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.18.20/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
	I1201 19:04:10.078609  292692 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.18.20/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
	I1201 19:04:10.498543  292692 start.go:929] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
	I1201 19:04:10.661127  292692 out.go:177] * Enabled addons: storage-provisioner, default-storageclass
	I1201 19:04:10.662989  292692 addons.go:502] enable addons completed in 1.006989786s: enabled=[storage-provisioner default-storageclass]
	I1201 19:04:12.006626  292692 pod_ready.go:102] pod "coredns-66bff467f8-v75d7" in "kube-system" namespace has status "Ready":"False"
	I1201 19:04:14.505428  292692 pod_ready.go:102] pod "coredns-66bff467f8-v75d7" in "kube-system" namespace has status "Ready":"False"
	I1201 19:04:17.003922  292692 pod_ready.go:102] pod "coredns-66bff467f8-v75d7" in "kube-system" namespace has status "Ready":"False"
	I1201 19:04:19.004221  292692 pod_ready.go:102] pod "coredns-66bff467f8-v75d7" in "kube-system" namespace has status "Ready":"False"
	I1201 19:04:21.505019  292692 pod_ready.go:102] pod "coredns-66bff467f8-v75d7" in "kube-system" namespace has status "Ready":"False"
	I1201 19:04:24.004341  292692 pod_ready.go:102] pod "coredns-66bff467f8-v75d7" in "kube-system" namespace has status "Ready":"False"
	I1201 19:04:24.504288  292692 pod_ready.go:92] pod "coredns-66bff467f8-v75d7" in "kube-system" namespace has status "Ready":"True"
	I1201 19:04:24.504318  292692 pod_ready.go:81] duration metric: took 14.526447793s waiting for pod "coredns-66bff467f8-v75d7" in "kube-system" namespace to be "Ready" ...
	I1201 19:04:24.504331  292692 pod_ready.go:78] waiting up to 6m0s for pod "etcd-ingress-addon-legacy-853196" in "kube-system" namespace to be "Ready" ...
	I1201 19:04:24.509181  292692 pod_ready.go:92] pod "etcd-ingress-addon-legacy-853196" in "kube-system" namespace has status "Ready":"True"
	I1201 19:04:24.509208  292692 pod_ready.go:81] duration metric: took 4.868125ms waiting for pod "etcd-ingress-addon-legacy-853196" in "kube-system" namespace to be "Ready" ...
	I1201 19:04:24.509222  292692 pod_ready.go:78] waiting up to 6m0s for pod "kube-apiserver-ingress-addon-legacy-853196" in "kube-system" namespace to be "Ready" ...
	I1201 19:04:24.514705  292692 pod_ready.go:92] pod "kube-apiserver-ingress-addon-legacy-853196" in "kube-system" namespace has status "Ready":"True"
	I1201 19:04:24.514732  292692 pod_ready.go:81] duration metric: took 5.501945ms waiting for pod "kube-apiserver-ingress-addon-legacy-853196" in "kube-system" namespace to be "Ready" ...
	I1201 19:04:24.514745  292692 pod_ready.go:78] waiting up to 6m0s for pod "kube-controller-manager-ingress-addon-legacy-853196" in "kube-system" namespace to be "Ready" ...
	I1201 19:04:24.519713  292692 pod_ready.go:92] pod "kube-controller-manager-ingress-addon-legacy-853196" in "kube-system" namespace has status "Ready":"True"
	I1201 19:04:24.519752  292692 pod_ready.go:81] duration metric: took 4.999553ms waiting for pod "kube-controller-manager-ingress-addon-legacy-853196" in "kube-system" namespace to be "Ready" ...
	I1201 19:04:24.519766  292692 pod_ready.go:78] waiting up to 6m0s for pod "kube-proxy-9bcw8" in "kube-system" namespace to be "Ready" ...
	I1201 19:04:24.525086  292692 pod_ready.go:92] pod "kube-proxy-9bcw8" in "kube-system" namespace has status "Ready":"True"
	I1201 19:04:24.525112  292692 pod_ready.go:81] duration metric: took 5.337189ms waiting for pod "kube-proxy-9bcw8" in "kube-system" namespace to be "Ready" ...
	I1201 19:04:24.525123  292692 pod_ready.go:78] waiting up to 6m0s for pod "kube-scheduler-ingress-addon-legacy-853196" in "kube-system" namespace to be "Ready" ...
	I1201 19:04:24.699507  292692 request.go:629] Waited for 174.296655ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-scheduler-ingress-addon-legacy-853196
	I1201 19:04:24.899702  292692 request.go:629] Waited for 197.336919ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/nodes/ingress-addon-legacy-853196
	I1201 19:04:24.902550  292692 pod_ready.go:92] pod "kube-scheduler-ingress-addon-legacy-853196" in "kube-system" namespace has status "Ready":"True"
	I1201 19:04:24.902574  292692 pod_ready.go:81] duration metric: took 377.443823ms waiting for pod "kube-scheduler-ingress-addon-legacy-853196" in "kube-system" namespace to be "Ready" ...
	I1201 19:04:24.902585  292692 pod_ready.go:38] duration metric: took 14.934425922s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
	I1201 19:04:24.902618  292692 api_server.go:52] waiting for apiserver process to appear ...
	I1201 19:04:24.902698  292692 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I1201 19:04:24.916053  292692 api_server.go:72] duration metric: took 15.166269957s to wait for apiserver process to appear ...
	I1201 19:04:24.916122  292692 api_server.go:88] waiting for apiserver healthz status ...
	I1201 19:04:24.916145  292692 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
	I1201 19:04:24.924989  292692 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
	ok
	I1201 19:04:24.925833  292692 api_server.go:141] control plane version: v1.18.20
	I1201 19:04:24.925857  292692 api_server.go:131] duration metric: took 9.722507ms to wait for apiserver health ...
	I1201 19:04:24.925878  292692 system_pods.go:43] waiting for kube-system pods to appear ...
	I1201 19:04:25.099229  292692 request.go:629] Waited for 173.278795ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods
	I1201 19:04:25.105946  292692 system_pods.go:59] 8 kube-system pods found
	I1201 19:04:25.105988  292692 system_pods.go:61] "coredns-66bff467f8-v75d7" [99dbf341-d75f-491d-ac34-95f532376e5e] Running
	I1201 19:04:25.105996  292692 system_pods.go:61] "etcd-ingress-addon-legacy-853196" [7ec7c9cc-d6ee-402c-976b-a180135dbaef] Running
	I1201 19:04:25.106001  292692 system_pods.go:61] "kindnet-76n95" [e6b6fe21-dcb0-48a2-b38d-ed18bd56ff22] Running
	I1201 19:04:25.106007  292692 system_pods.go:61] "kube-apiserver-ingress-addon-legacy-853196" [62f7fbce-40eb-4bb6-b193-3f2da5583d69] Running
	I1201 19:04:25.106013  292692 system_pods.go:61] "kube-controller-manager-ingress-addon-legacy-853196" [6d2b9457-a94c-4694-b4c1-f9bbd5ad75d1] Running
	I1201 19:04:25.106019  292692 system_pods.go:61] "kube-proxy-9bcw8" [e6d2e090-e2a7-4b60-9b3a-3a31cf73179a] Running
	I1201 19:04:25.106024  292692 system_pods.go:61] "kube-scheduler-ingress-addon-legacy-853196" [99d33909-e806-4a59-a7f7-372ac5feaa14] Running
	I1201 19:04:25.106029  292692 system_pods.go:61] "storage-provisioner" [ca7753a4-e0ae-493d-9406-0afc73e9ab9a] Running
	I1201 19:04:25.106042  292692 system_pods.go:74] duration metric: took 180.153584ms to wait for pod list to return data ...
	I1201 19:04:25.106054  292692 default_sa.go:34] waiting for default service account to be created ...
	I1201 19:04:25.299468  292692 request.go:629] Waited for 193.320602ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/namespaces/default/serviceaccounts
	I1201 19:04:25.301993  292692 default_sa.go:45] found service account: "default"
	I1201 19:04:25.302026  292692 default_sa.go:55] duration metric: took 195.959158ms for default service account to be created ...
	I1201 19:04:25.302036  292692 system_pods.go:116] waiting for k8s-apps to be running ...
	I1201 19:04:25.499472  292692 request.go:629] Waited for 197.352531ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods
	I1201 19:04:25.505858  292692 system_pods.go:86] 8 kube-system pods found
	I1201 19:04:25.505889  292692 system_pods.go:89] "coredns-66bff467f8-v75d7" [99dbf341-d75f-491d-ac34-95f532376e5e] Running
	I1201 19:04:25.505896  292692 system_pods.go:89] "etcd-ingress-addon-legacy-853196" [7ec7c9cc-d6ee-402c-976b-a180135dbaef] Running
	I1201 19:04:25.505902  292692 system_pods.go:89] "kindnet-76n95" [e6b6fe21-dcb0-48a2-b38d-ed18bd56ff22] Running
	I1201 19:04:25.505912  292692 system_pods.go:89] "kube-apiserver-ingress-addon-legacy-853196" [62f7fbce-40eb-4bb6-b193-3f2da5583d69] Running
	I1201 19:04:25.505917  292692 system_pods.go:89] "kube-controller-manager-ingress-addon-legacy-853196" [6d2b9457-a94c-4694-b4c1-f9bbd5ad75d1] Running
	I1201 19:04:25.505922  292692 system_pods.go:89] "kube-proxy-9bcw8" [e6d2e090-e2a7-4b60-9b3a-3a31cf73179a] Running
	I1201 19:04:25.505927  292692 system_pods.go:89] "kube-scheduler-ingress-addon-legacy-853196" [99d33909-e806-4a59-a7f7-372ac5feaa14] Running
	I1201 19:04:25.505931  292692 system_pods.go:89] "storage-provisioner" [ca7753a4-e0ae-493d-9406-0afc73e9ab9a] Running
	I1201 19:04:25.505946  292692 system_pods.go:126] duration metric: took 203.880175ms to wait for k8s-apps to be running ...
	I1201 19:04:25.505956  292692 system_svc.go:44] waiting for kubelet service to be running ....
	I1201 19:04:25.506012  292692 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I1201 19:04:25.519584  292692 system_svc.go:56] duration metric: took 13.617421ms WaitForService to wait for kubelet.
	I1201 19:04:25.519610  292692 kubeadm.go:581] duration metric: took 15.769835496s to wait for : map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] ...
	I1201 19:04:25.519631  292692 node_conditions.go:102] verifying NodePressure condition ...
	I1201 19:04:25.700023  292692 request.go:629] Waited for 180.307553ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/nodes
	I1201 19:04:25.703111  292692 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
	I1201 19:04:25.703152  292692 node_conditions.go:123] node cpu capacity is 2
	I1201 19:04:25.703163  292692 node_conditions.go:105] duration metric: took 183.52731ms to run NodePressure ...
	I1201 19:04:25.703194  292692 start.go:228] waiting for startup goroutines ...
	I1201 19:04:25.703208  292692 start.go:233] waiting for cluster config update ...
	I1201 19:04:25.703219  292692 start.go:242] writing updated cluster config ...
	I1201 19:04:25.703508  292692 ssh_runner.go:195] Run: rm -f paused
	I1201 19:04:25.766774  292692 start.go:600] kubectl: 1.28.4, cluster: 1.18.20 (minor skew: 10)
	I1201 19:04:25.768977  292692 out.go:177] 
	W1201 19:04:25.770998  292692 out.go:239] ! /usr/local/bin/kubectl is version 1.28.4, which may have incompatibilities with Kubernetes 1.18.20.
	I1201 19:04:25.773369  292692 out.go:177]   - Want kubectl v1.18.20? Try 'minikube kubectl -- get pods -A'
	I1201 19:04:25.775201  292692 out.go:177] * Done! kubectl is now configured to use "ingress-addon-legacy-853196" cluster and "default" namespace by default
	
	* 
	* ==> container status <==
	* CONTAINER           IMAGE               CREATED              STATE               NAME                      ATTEMPT             POD ID              POD
	867e7eb6262c2       dd1b12fcb6097       12 seconds ago       Exited              hello-world-app           2                   d1c55c8de9f89       hello-world-app-5f5d8b66bb-wd4qx
	035bd22986455       f09fc93534f6a       35 seconds ago       Running             nginx                     0                   c03c8f0fde007       nginx
	884b75382ca47       d7f0cba3aa5bf       51 seconds ago       Exited              controller                0                   c41cb1602a2ac       ingress-nginx-controller-7fcf777cb7-r9scq
	b064b318ae43f       a883f7fc35610       57 seconds ago       Exited              patch                     0                   20eba5b0c841c       ingress-nginx-admission-patch-5ttvl
	10a1f17380cc9       a883f7fc35610       58 seconds ago       Exited              create                    0                   4c0d24fc00f5c       ingress-nginx-admission-create-c27g5
	be91c9d930d8c       6e17ba78cf3eb       About a minute ago   Running             coredns                   0                   6c1b9587f9e3b       coredns-66bff467f8-v75d7
	335972d2dfa89       ba04bb24b9575       About a minute ago   Running             storage-provisioner       0                   69b4129c3c0b7       storage-provisioner
	eafdbb78b6d79       04b4eaa3d3db8       About a minute ago   Running             kindnet-cni               0                   81267229825ed       kindnet-76n95
	7d6571b43bec6       565297bc6f7d4       About a minute ago   Running             kube-proxy                0                   85a09ff3e5199       kube-proxy-9bcw8
	fda32a8985773       68a4fac29a865       About a minute ago   Running             kube-controller-manager   0                   378ecc1bf3cb9       kube-controller-manager-ingress-addon-legacy-853196
	e00c151208d7e       ab707b0a0ea33       About a minute ago   Running             etcd                      0                   a52052eb1209f       etcd-ingress-addon-legacy-853196
	bbc87b9ea593b       095f37015706d       About a minute ago   Running             kube-scheduler            0                   041d608629dce       kube-scheduler-ingress-addon-legacy-853196
	9b83c9b3a0836       2694cf044d665       About a minute ago   Running             kube-apiserver            0                   09b1f59a2ecb8       kube-apiserver-ingress-addon-legacy-853196
	
	* 
	* ==> containerd <==
	* Dec 01 19:05:16 ingress-addon-legacy-853196 containerd[823]: time="2023-12-01T19:05:16.108423221Z" level=info msg="RemoveContainer for \"ed8e97fcb706ff7143e5b0d990df6de6070cb79b7252f7f9040fe0d364b61aa3\" returns successfully"
	Dec 01 19:05:18 ingress-addon-legacy-853196 containerd[823]: time="2023-12-01T19:05:18.689166036Z" level=info msg="StopContainer for \"884b75382ca47b517ee0bf0d39246324aa3cf8039c8e1822d946aabba94db6f7\" with timeout 2 (s)"
	Dec 01 19:05:18 ingress-addon-legacy-853196 containerd[823]: time="2023-12-01T19:05:18.689905045Z" level=info msg="Stop container \"884b75382ca47b517ee0bf0d39246324aa3cf8039c8e1822d946aabba94db6f7\" with signal terminated"
	Dec 01 19:05:18 ingress-addon-legacy-853196 containerd[823]: time="2023-12-01T19:05:18.701400956Z" level=info msg="StopContainer for \"884b75382ca47b517ee0bf0d39246324aa3cf8039c8e1822d946aabba94db6f7\" with timeout 2 (s)"
	Dec 01 19:05:18 ingress-addon-legacy-853196 containerd[823]: time="2023-12-01T19:05:18.705764422Z" level=info msg="Skipping the sending of signal terminated to container \"884b75382ca47b517ee0bf0d39246324aa3cf8039c8e1822d946aabba94db6f7\" because a prior stop with timeout>0 request already sent the signal"
	Dec 01 19:05:20 ingress-addon-legacy-853196 containerd[823]: time="2023-12-01T19:05:20.706475944Z" level=info msg="Kill container \"884b75382ca47b517ee0bf0d39246324aa3cf8039c8e1822d946aabba94db6f7\""
	Dec 01 19:05:20 ingress-addon-legacy-853196 containerd[823]: time="2023-12-01T19:05:20.706475985Z" level=info msg="Kill container \"884b75382ca47b517ee0bf0d39246324aa3cf8039c8e1822d946aabba94db6f7\""
	Dec 01 19:05:20 ingress-addon-legacy-853196 containerd[823]: time="2023-12-01T19:05:20.783098645Z" level=info msg="shim disconnected" id=884b75382ca47b517ee0bf0d39246324aa3cf8039c8e1822d946aabba94db6f7
	Dec 01 19:05:20 ingress-addon-legacy-853196 containerd[823]: time="2023-12-01T19:05:20.783170603Z" level=warning msg="cleaning up after shim disconnected" id=884b75382ca47b517ee0bf0d39246324aa3cf8039c8e1822d946aabba94db6f7 namespace=k8s.io
	Dec 01 19:05:20 ingress-addon-legacy-853196 containerd[823]: time="2023-12-01T19:05:20.783183345Z" level=info msg="cleaning up dead shim"
	Dec 01 19:05:20 ingress-addon-legacy-853196 containerd[823]: time="2023-12-01T19:05:20.801392982Z" level=warning msg="cleanup warnings time=\"2023-12-01T19:05:20Z\" level=info msg=\"starting signal loop\" namespace=k8s.io pid=4614 runtime=io.containerd.runc.v2\n"
	Dec 01 19:05:20 ingress-addon-legacy-853196 containerd[823]: time="2023-12-01T19:05:20.804774744Z" level=info msg="StopContainer for \"884b75382ca47b517ee0bf0d39246324aa3cf8039c8e1822d946aabba94db6f7\" returns successfully"
	Dec 01 19:05:20 ingress-addon-legacy-853196 containerd[823]: time="2023-12-01T19:05:20.804916280Z" level=info msg="StopContainer for \"884b75382ca47b517ee0bf0d39246324aa3cf8039c8e1822d946aabba94db6f7\" returns successfully"
	Dec 01 19:05:20 ingress-addon-legacy-853196 containerd[823]: time="2023-12-01T19:05:20.805459821Z" level=info msg="StopPodSandbox for \"c41cb1602a2acbdb68115c94aa665e9f3ab24b9545a74a3bd35b93d223119b65\""
	Dec 01 19:05:20 ingress-addon-legacy-853196 containerd[823]: time="2023-12-01T19:05:20.805536809Z" level=info msg="Container to stop \"884b75382ca47b517ee0bf0d39246324aa3cf8039c8e1822d946aabba94db6f7\" must be in running or unknown state, current state \"CONTAINER_EXITED\""
	Dec 01 19:05:20 ingress-addon-legacy-853196 containerd[823]: time="2023-12-01T19:05:20.805823812Z" level=info msg="StopPodSandbox for \"c41cb1602a2acbdb68115c94aa665e9f3ab24b9545a74a3bd35b93d223119b65\""
	Dec 01 19:05:20 ingress-addon-legacy-853196 containerd[823]: time="2023-12-01T19:05:20.805954255Z" level=info msg="Container to stop \"884b75382ca47b517ee0bf0d39246324aa3cf8039c8e1822d946aabba94db6f7\" must be in running or unknown state, current state \"CONTAINER_EXITED\""
	Dec 01 19:05:20 ingress-addon-legacy-853196 containerd[823]: time="2023-12-01T19:05:20.842764031Z" level=info msg="shim disconnected" id=c41cb1602a2acbdb68115c94aa665e9f3ab24b9545a74a3bd35b93d223119b65
	Dec 01 19:05:20 ingress-addon-legacy-853196 containerd[823]: time="2023-12-01T19:05:20.843062521Z" level=warning msg="cleaning up after shim disconnected" id=c41cb1602a2acbdb68115c94aa665e9f3ab24b9545a74a3bd35b93d223119b65 namespace=k8s.io
	Dec 01 19:05:20 ingress-addon-legacy-853196 containerd[823]: time="2023-12-01T19:05:20.843075493Z" level=info msg="cleaning up dead shim"
	Dec 01 19:05:20 ingress-addon-legacy-853196 containerd[823]: time="2023-12-01T19:05:20.853676443Z" level=warning msg="cleanup warnings time=\"2023-12-01T19:05:20Z\" level=info msg=\"starting signal loop\" namespace=k8s.io pid=4651 runtime=io.containerd.runc.v2\n"
	Dec 01 19:05:20 ingress-addon-legacy-853196 containerd[823]: time="2023-12-01T19:05:20.911879614Z" level=info msg="TearDown network for sandbox \"c41cb1602a2acbdb68115c94aa665e9f3ab24b9545a74a3bd35b93d223119b65\" successfully"
	Dec 01 19:05:20 ingress-addon-legacy-853196 containerd[823]: time="2023-12-01T19:05:20.911931470Z" level=info msg="StopPodSandbox for \"c41cb1602a2acbdb68115c94aa665e9f3ab24b9545a74a3bd35b93d223119b65\" returns successfully"
	Dec 01 19:05:20 ingress-addon-legacy-853196 containerd[823]: time="2023-12-01T19:05:20.917261938Z" level=info msg="TearDown network for sandbox \"c41cb1602a2acbdb68115c94aa665e9f3ab24b9545a74a3bd35b93d223119b65\" successfully"
	Dec 01 19:05:20 ingress-addon-legacy-853196 containerd[823]: time="2023-12-01T19:05:20.917448331Z" level=info msg="StopPodSandbox for \"c41cb1602a2acbdb68115c94aa665e9f3ab24b9545a74a3bd35b93d223119b65\" returns successfully"
	
	* 
	* ==> coredns [be91c9d930d8caef1261036366c911b58e95a9df49367f5cd9e55e789965d67a] <==
	* [INFO] 10.244.0.5:35045 - 43341 "AAAA IN hello-world-app.default.svc.cluster.local.svc.cluster.local. udp 77 false 512" NXDOMAIN qr,aa,rd 170 0.000069332s
	[INFO] 10.244.0.5:47104 - 63257 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.00010911s
	[INFO] 10.244.0.5:35045 - 20095 "A IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.00007519s
	[INFO] 10.244.0.5:37592 - 4112 "A IN hello-world-app.default.svc.cluster.local.ingress-nginx.svc.cluster.local. udp 91 false 512" NXDOMAIN qr,aa,rd 184 0.000061094s
	[INFO] 10.244.0.5:35045 - 11203 "AAAA IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000065418s
	[INFO] 10.244.0.5:37592 - 47093 "AAAA IN hello-world-app.default.svc.cluster.local.ingress-nginx.svc.cluster.local. udp 91 false 512" NXDOMAIN qr,aa,rd 184 0.000045595s
	[INFO] 10.244.0.5:37592 - 5441 "A IN hello-world-app.default.svc.cluster.local.svc.cluster.local. udp 77 false 512" NXDOMAIN qr,aa,rd 170 0.000063261s
	[INFO] 10.244.0.5:37592 - 8858 "AAAA IN hello-world-app.default.svc.cluster.local.svc.cluster.local. udp 77 false 512" NXDOMAIN qr,aa,rd 170 0.00004365s
	[INFO] 10.244.0.5:37592 - 50357 "A IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000048828s
	[INFO] 10.244.0.5:35045 - 48056 "A IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.00119826s
	[INFO] 10.244.0.5:37592 - 15248 "AAAA IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000038104s
	[INFO] 10.244.0.5:35045 - 46426 "AAAA IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001426424s
	[INFO] 10.244.0.5:37592 - 39074 "A IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001050184s
	[INFO] 10.244.0.5:35045 - 17717 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000141881s
	[INFO] 10.244.0.5:37592 - 64481 "AAAA IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.00125288s
	[INFO] 10.244.0.5:37592 - 53843 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000041304s
	[INFO] 10.244.0.5:56997 - 967 "A IN hello-world-app.default.svc.cluster.local.ingress-nginx.svc.cluster.local. udp 91 false 512" NXDOMAIN qr,aa,rd 184 0.000091585s
	[INFO] 10.244.0.5:56997 - 58893 "AAAA IN hello-world-app.default.svc.cluster.local.ingress-nginx.svc.cluster.local. udp 91 false 512" NXDOMAIN qr,aa,rd 184 0.000051421s
	[INFO] 10.244.0.5:56997 - 45112 "A IN hello-world-app.default.svc.cluster.local.svc.cluster.local. udp 77 false 512" NXDOMAIN qr,aa,rd 170 0.000051281s
	[INFO] 10.244.0.5:56997 - 43083 "AAAA IN hello-world-app.default.svc.cluster.local.svc.cluster.local. udp 77 false 512" NXDOMAIN qr,aa,rd 170 0.000064853s
	[INFO] 10.244.0.5:56997 - 19164 "A IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000040951s
	[INFO] 10.244.0.5:56997 - 63092 "AAAA IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000045579s
	[INFO] 10.244.0.5:56997 - 26974 "A IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001377022s
	[INFO] 10.244.0.5:56997 - 8110 "AAAA IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.000997762s
	[INFO] 10.244.0.5:56997 - 29848 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000068405s
	
	* 
	* ==> describe nodes <==
	* Name:               ingress-addon-legacy-853196
	Roles:              master
	Labels:             beta.kubernetes.io/arch=arm64
	                    beta.kubernetes.io/os=linux
	                    kubernetes.io/arch=arm64
	                    kubernetes.io/hostname=ingress-addon-legacy-853196
	                    kubernetes.io/os=linux
	                    minikube.k8s.io/commit=a7798054841a94294fc1e610bab097fa7942f774
	                    minikube.k8s.io/name=ingress-addon-legacy-853196
	                    minikube.k8s.io/primary=true
	                    minikube.k8s.io/updated_at=2023_12_01T19_03_54_0700
	                    minikube.k8s.io/version=v1.32.0
	                    node-role.kubernetes.io/master=
	Annotations:        kubeadm.alpha.kubernetes.io/cri-socket: /run/containerd/containerd.sock
	                    node.alpha.kubernetes.io/ttl: 0
	                    volumes.kubernetes.io/controller-managed-attach-detach: true
	CreationTimestamp:  Fri, 01 Dec 2023 19:03:51 +0000
	Taints:             <none>
	Unschedulable:      false
	Lease:
	  HolderIdentity:  ingress-addon-legacy-853196
	  AcquireTime:     <unset>
	  RenewTime:       Fri, 01 Dec 2023 19:05:17 +0000
	Conditions:
	  Type             Status  LastHeartbeatTime                 LastTransitionTime                Reason                       Message
	  ----             ------  -----------------                 ------------------                ------                       -------
	  MemoryPressure   False   Fri, 01 Dec 2023 19:04:57 +0000   Fri, 01 Dec 2023 19:03:43 +0000   KubeletHasSufficientMemory   kubelet has sufficient memory available
	  DiskPressure     False   Fri, 01 Dec 2023 19:04:57 +0000   Fri, 01 Dec 2023 19:03:43 +0000   KubeletHasNoDiskPressure     kubelet has no disk pressure
	  PIDPressure      False   Fri, 01 Dec 2023 19:04:57 +0000   Fri, 01 Dec 2023 19:03:43 +0000   KubeletHasSufficientPID      kubelet has sufficient PID available
	  Ready            True    Fri, 01 Dec 2023 19:04:57 +0000   Fri, 01 Dec 2023 19:04:07 +0000   KubeletReady                 kubelet is posting ready status
	Addresses:
	  InternalIP:  192.168.49.2
	  Hostname:    ingress-addon-legacy-853196
	Capacity:
	  cpu:                2
	  ephemeral-storage:  203034800Ki
	  hugepages-1Gi:      0
	  hugepages-2Mi:      0
	  hugepages-32Mi:     0
	  hugepages-64Ki:     0
	  memory:             8022500Ki
	  pods:               110
	Allocatable:
	  cpu:                2
	  ephemeral-storage:  203034800Ki
	  hugepages-1Gi:      0
	  hugepages-2Mi:      0
	  hugepages-32Mi:     0
	  hugepages-64Ki:     0
	  memory:             8022500Ki
	  pods:               110
	System Info:
	  Machine ID:                 57c17c4c80024f23a806f4917ed8e223
	  System UUID:                a05d5436-4171-4923-8a3b-3bea38fccc15
	  Boot ID:                    8abca68d-6ef7-4596-a2cf-01a2291ed738
	  Kernel Version:             5.15.0-1050-aws
	  OS Image:                   Ubuntu 22.04.3 LTS
	  Operating System:           linux
	  Architecture:               arm64
	  Container Runtime Version:  containerd://1.6.25
	  Kubelet Version:            v1.18.20
	  Kube-Proxy Version:         v1.18.20
	PodCIDR:                      10.244.0.0/24
	PodCIDRs:                     10.244.0.0/24
	Non-terminated Pods:          (10 in total)
	  Namespace                   Name                                                   CPU Requests  CPU Limits  Memory Requests  Memory Limits  AGE
	  ---------                   ----                                                   ------------  ----------  ---------------  -------------  ---
	  default                     hello-world-app-5f5d8b66bb-wd4qx                       0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         28s
	  default                     nginx                                                  0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         37s
	  kube-system                 coredns-66bff467f8-v75d7                               100m (5%!)(MISSING)     0 (0%!)(MISSING)      70Mi (0%!)(MISSING)        170Mi (2%!)(MISSING)     78s
	  kube-system                 etcd-ingress-addon-legacy-853196                       0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         89s
	  kube-system                 kindnet-76n95                                          100m (5%!)(MISSING)     100m (5%!)(MISSING)   50Mi (0%!)(MISSING)        50Mi (0%!)(MISSING)      78s
	  kube-system                 kube-apiserver-ingress-addon-legacy-853196             250m (12%!)(MISSING)    0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         88s
	  kube-system                 kube-controller-manager-ingress-addon-legacy-853196    200m (10%!)(MISSING)    0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         89s
	  kube-system                 kube-proxy-9bcw8                                       0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         78s
	  kube-system                 kube-scheduler-ingress-addon-legacy-853196             100m (5%!)(MISSING)     0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         89s
	  kube-system                 storage-provisioner                                    0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         76s
	Allocated resources:
	  (Total limits may be over 100 percent, i.e., overcommitted.)
	  Resource           Requests    Limits
	  --------           --------    ------
	  cpu                750m (37%!)(MISSING)  100m (5%!)(MISSING)
	  memory             120Mi (1%!)(MISSING)  220Mi (2%!)(MISSING)
	  ephemeral-storage  0 (0%!)(MISSING)      0 (0%!)(MISSING)
	  hugepages-1Gi      0 (0%!)(MISSING)      0 (0%!)(MISSING)
	  hugepages-2Mi      0 (0%!)(MISSING)      0 (0%!)(MISSING)
	  hugepages-32Mi     0 (0%!)(MISSING)      0 (0%!)(MISSING)
	  hugepages-64Ki     0 (0%!)(MISSING)      0 (0%!)(MISSING)
	Events:
	  Type    Reason                   Age                  From        Message
	  ----    ------                   ----                 ----        -------
	  Normal  NodeHasSufficientMemory  104s (x5 over 104s)  kubelet     Node ingress-addon-legacy-853196 status is now: NodeHasSufficientMemory
	  Normal  NodeHasNoDiskPressure    104s (x4 over 104s)  kubelet     Node ingress-addon-legacy-853196 status is now: NodeHasNoDiskPressure
	  Normal  NodeHasSufficientPID     104s (x4 over 104s)  kubelet     Node ingress-addon-legacy-853196 status is now: NodeHasSufficientPID
	  Normal  Starting                 89s                  kubelet     Starting kubelet.
	  Normal  NodeHasSufficientMemory  89s                  kubelet     Node ingress-addon-legacy-853196 status is now: NodeHasSufficientMemory
	  Normal  NodeHasNoDiskPressure    89s                  kubelet     Node ingress-addon-legacy-853196 status is now: NodeHasNoDiskPressure
	  Normal  NodeHasSufficientPID     89s                  kubelet     Node ingress-addon-legacy-853196 status is now: NodeHasSufficientPID
	  Normal  NodeAllocatableEnforced  89s                  kubelet     Updated Node Allocatable limit across pods
	  Normal  NodeReady                79s                  kubelet     Node ingress-addon-legacy-853196 status is now: NodeReady
	  Normal  Starting                 77s                  kube-proxy  Starting kube-proxy.
	
	* 
	* ==> dmesg <==
	* [  +0.000857] FS-Cache: N-cookie c=0000001e [p=00000015 fl=2 nc=0 na=1]
	[  +0.001069] FS-Cache: N-cookie d=00000000c0e2b83e{9p.inode} n=00000000fdaac598
	[  +0.001228] FS-Cache: N-key=[8] '963a5c0100000000'
	[  +0.002961] FS-Cache: Duplicate cookie detected
	[  +0.000767] FS-Cache: O-cookie c=00000018 [p=00000015 fl=226 nc=0 na=1]
	[  +0.001108] FS-Cache: O-cookie d=00000000c0e2b83e{9p.inode} n=000000009a4ad753
	[  +0.001187] FS-Cache: O-key=[8] '963a5c0100000000'
	[  +0.000796] FS-Cache: N-cookie c=0000001f [p=00000015 fl=2 nc=0 na=1]
	[  +0.001037] FS-Cache: N-cookie d=00000000c0e2b83e{9p.inode} n=00000000850f6b07
	[  +0.001177] FS-Cache: N-key=[8] '963a5c0100000000'
	[  +2.887436] FS-Cache: Duplicate cookie detected
	[  +0.003859] FS-Cache: O-cookie c=00000016 [p=00000015 fl=226 nc=0 na=1]
	[  +0.001036] FS-Cache: O-cookie d=00000000c0e2b83e{9p.inode} n=000000007191b30a
	[  +0.001089] FS-Cache: O-key=[8] '953a5c0100000000'
	[  +0.000745] FS-Cache: N-cookie c=00000021 [p=00000015 fl=2 nc=0 na=1]
	[  +0.001010] FS-Cache: N-cookie d=00000000c0e2b83e{9p.inode} n=00000000fdaac598
	[  +0.001177] FS-Cache: N-key=[8] '953a5c0100000000'
	[  +0.509610] FS-Cache: Duplicate cookie detected
	[  +0.000768] FS-Cache: O-cookie c=0000001b [p=00000015 fl=226 nc=0 na=1]
	[  +0.001086] FS-Cache: O-cookie d=00000000c0e2b83e{9p.inode} n=0000000010bf0fcb
	[  +0.001092] FS-Cache: O-key=[8] '9b3a5c0100000000'
	[  +0.000747] FS-Cache: N-cookie c=00000022 [p=00000015 fl=2 nc=0 na=1]
	[  +0.001044] FS-Cache: N-cookie d=00000000c0e2b83e{9p.inode} n=00000000ba7a3082
	[  +0.001145] FS-Cache: N-key=[8] '9b3a5c0100000000'
	[Dec 1 19:03] hrtimer: interrupt took 40883389 ns
	
	* 
	* ==> etcd [e00c151208d7eb679df9d2dacbf76cbbc6bba1264e67ad929f058baa3ab18dc0] <==
	* raft2023/12/01 19:03:45 INFO: aec36adc501070cc became follower at term 0
	raft2023/12/01 19:03:45 INFO: newRaft aec36adc501070cc [peers: [], term: 0, commit: 0, applied: 0, lastindex: 0, lastterm: 0]
	raft2023/12/01 19:03:45 INFO: aec36adc501070cc became follower at term 1
	raft2023/12/01 19:03:45 INFO: aec36adc501070cc switched to configuration voters=(12593026477526642892)
	2023-12-01 19:03:45.782400 W | auth: simple token is not cryptographically signed
	2023-12-01 19:03:45.786139 I | etcdserver: starting server... [version: 3.4.3, cluster version: to_be_decided]
	2023-12-01 19:03:45.790228 I | embed: ClientTLS: cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = 
	2023-12-01 19:03:45.790548 I | embed: listening for metrics on http://127.0.0.1:2381
	2023-12-01 19:03:45.790943 I | embed: listening for peers on 192.168.49.2:2380
	2023-12-01 19:03:45.791265 I | etcdserver: aec36adc501070cc as single-node; fast-forwarding 9 ticks (election ticks 10)
	raft2023/12/01 19:03:45 INFO: aec36adc501070cc switched to configuration voters=(12593026477526642892)
	2023-12-01 19:03:45.791790 I | etcdserver/membership: added member aec36adc501070cc [https://192.168.49.2:2380] to cluster fa54960ea34d58be
	raft2023/12/01 19:03:46 INFO: aec36adc501070cc is starting a new election at term 1
	raft2023/12/01 19:03:46 INFO: aec36adc501070cc became candidate at term 2
	raft2023/12/01 19:03:46 INFO: aec36adc501070cc received MsgVoteResp from aec36adc501070cc at term 2
	raft2023/12/01 19:03:46 INFO: aec36adc501070cc became leader at term 2
	raft2023/12/01 19:03:46 INFO: raft.node: aec36adc501070cc elected leader aec36adc501070cc at term 2
	2023-12-01 19:03:46.077582 I | etcdserver: published {Name:ingress-addon-legacy-853196 ClientURLs:[https://192.168.49.2:2379]} to cluster fa54960ea34d58be
	2023-12-01 19:03:46.077696 I | embed: ready to serve client requests
	2023-12-01 19:03:46.077894 I | embed: ready to serve client requests
	2023-12-01 19:03:46.079339 I | embed: serving client requests on 127.0.0.1:2379
	2023-12-01 19:03:46.079559 I | embed: serving client requests on 192.168.49.2:2379
	2023-12-01 19:03:46.079752 I | etcdserver: setting up the initial cluster version to 3.4
	2023-12-01 19:03:46.086041 N | etcdserver/membership: set the initial cluster version to 3.4
	2023-12-01 19:03:46.086130 I | etcdserver/api: enabled capabilities for version 3.4
	
	* 
	* ==> kernel <==
	*  19:05:26 up  1:47,  0 users,  load average: 1.01, 1.38, 1.59
	Linux ingress-addon-legacy-853196 5.15.0-1050-aws #55~20.04.1-Ubuntu SMP Mon Nov 6 12:18:16 UTC 2023 aarch64 aarch64 aarch64 GNU/Linux
	PRETTY_NAME="Ubuntu 22.04.3 LTS"
	
	* 
	* ==> kindnet [eafdbb78b6d794ba8121b1dbad79069e000e299e6d28363b6871c4320059614f] <==
	* I1201 19:04:12.007933       1 main.go:102] connected to apiserver: https://10.96.0.1:443
	I1201 19:04:12.008027       1 main.go:107] hostIP = 192.168.49.2
	podIP = 192.168.49.2
	I1201 19:04:12.008165       1 main.go:116] setting mtu 1500 for CNI 
	I1201 19:04:12.008184       1 main.go:146] kindnetd IP family: "ipv4"
	I1201 19:04:12.008202       1 main.go:150] noMask IPv4 subnets: [10.244.0.0/16]
	I1201 19:04:12.315083       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I1201 19:04:12.315116       1 main.go:227] handling current node
	I1201 19:04:22.406060       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I1201 19:04:22.406090       1 main.go:227] handling current node
	I1201 19:04:32.421619       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I1201 19:04:32.421651       1 main.go:227] handling current node
	I1201 19:04:42.429666       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I1201 19:04:42.429698       1 main.go:227] handling current node
	I1201 19:04:52.433015       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I1201 19:04:52.433044       1 main.go:227] handling current node
	I1201 19:05:02.442440       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I1201 19:05:02.442471       1 main.go:227] handling current node
	I1201 19:05:12.446102       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I1201 19:05:12.446455       1 main.go:227] handling current node
	I1201 19:05:22.455471       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I1201 19:05:22.455501       1 main.go:227] handling current node
	
	* 
	* ==> kube-apiserver [9b83c9b3a083628bbfcf82736fb116f2c62a5b1f243c616cb46fe9033306030e] <==
	* E1201 19:03:50.940798       1 controller.go:152] Unable to remove old endpoints from kubernetes service: StorageError: key not found, Code: 1, Key: /registry/masterleases/192.168.49.2, ResourceVersion: 0, AdditionalErrorMsg: 
	I1201 19:03:51.020916       1 cache.go:39] Caches are synced for APIServiceRegistrationController controller
	I1201 19:03:51.021203       1 shared_informer.go:230] Caches are synced for cluster_authentication_trust_controller 
	I1201 19:03:51.021330       1 cache.go:39] Caches are synced for AvailableConditionController controller
	I1201 19:03:51.021452       1 cache.go:39] Caches are synced for autoregister controller
	I1201 19:03:51.032163       1 shared_informer.go:230] Caches are synced for crd-autoregister 
	I1201 19:03:51.816719       1 controller.go:130] OpenAPI AggregationController: action for item : Nothing (removed from the queue).
	I1201 19:03:51.816809       1 controller.go:130] OpenAPI AggregationController: action for item k8s_internal_local_delegation_chain_0000000000: Nothing (removed from the queue).
	I1201 19:03:51.827771       1 storage_scheduling.go:134] created PriorityClass system-node-critical with value 2000001000
	I1201 19:03:51.832197       1 storage_scheduling.go:134] created PriorityClass system-cluster-critical with value 2000000000
	I1201 19:03:51.832222       1 storage_scheduling.go:143] all system priority classes are created successfully or already exist.
	I1201 19:03:52.343142       1 controller.go:609] quota admission added evaluator for: roles.rbac.authorization.k8s.io
	I1201 19:03:52.384644       1 controller.go:609] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
	W1201 19:03:52.453674       1 lease.go:224] Resetting endpoints for master service "kubernetes" to [192.168.49.2]
	I1201 19:03:52.454852       1 controller.go:609] quota admission added evaluator for: endpoints
	I1201 19:03:52.458932       1 controller.go:609] quota admission added evaluator for: endpointslices.discovery.k8s.io
	I1201 19:03:53.268989       1 controller.go:609] quota admission added evaluator for: serviceaccounts
	I1201 19:03:54.043719       1 controller.go:609] quota admission added evaluator for: deployments.apps
	I1201 19:03:54.092913       1 controller.go:609] quota admission added evaluator for: daemonsets.apps
	I1201 19:03:57.527396       1 controller.go:609] quota admission added evaluator for: leases.coordination.k8s.io
	I1201 19:04:08.671460       1 controller.go:609] quota admission added evaluator for: replicasets.apps
	I1201 19:04:08.690129       1 controller.go:609] quota admission added evaluator for: controllerrevisions.apps
	I1201 19:04:26.711472       1 controller.go:609] quota admission added evaluator for: jobs.batch
	I1201 19:04:49.289139       1 controller.go:609] quota admission added evaluator for: ingresses.networking.k8s.io
	E1201 19:05:18.707285       1 authentication.go:53] Unable to authenticate the request due to an error: [invalid bearer token, Token has been invalidated]
	
	* 
	* ==> kube-controller-manager [fda32a8985773bbdac32e3e9c9fb3c90157638fb574d81ced96244f07ad1a085] <==
	* I1201 19:04:09.130791       1 disruption.go:339] Sending events to api server.
	I1201 19:04:09.193712       1 shared_informer.go:230] Caches are synced for attach detach 
	I1201 19:04:09.216677       1 shared_informer.go:230] Caches are synced for service account 
	I1201 19:04:09.224796       1 shared_informer.go:230] Caches are synced for namespace 
	I1201 19:04:09.273184       1 shared_informer.go:230] Caches are synced for resource quota 
	I1201 19:04:09.274005       1 shared_informer.go:230] Caches are synced for garbage collector 
	I1201 19:04:09.274111       1 garbagecollector.go:142] Garbage collector: all resource monitors have synced. Proceeding to collect garbage
	I1201 19:04:09.275671       1 shared_informer.go:230] Caches are synced for taint 
	I1201 19:04:09.275821       1 node_lifecycle_controller.go:1433] Initializing eviction metric for zone: 
	W1201 19:04:09.275886       1 node_lifecycle_controller.go:1048] Missing timestamp for Node ingress-addon-legacy-853196. Assuming now as a timestamp.
	I1201 19:04:09.275920       1 node_lifecycle_controller.go:1249] Controller detected that zone  is now in state Normal.
	I1201 19:04:09.276262       1 taint_manager.go:187] Starting NoExecuteTaintManager
	I1201 19:04:09.276741       1 event.go:278] Event(v1.ObjectReference{Kind:"Node", Namespace:"", Name:"ingress-addon-legacy-853196", UID:"6152397c-39f4-4f79-94c0-1eb49d692acc", APIVersion:"v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RegisteredNode' Node ingress-addon-legacy-853196 event: Registered Node ingress-addon-legacy-853196 in Controller
	I1201 19:04:09.288576       1 shared_informer.go:230] Caches are synced for garbage collector 
	I1201 19:04:09.323354       1 shared_informer.go:230] Caches are synced for resource quota 
	I1201 19:04:09.749289       1 event.go:278] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"kube-system", Name:"coredns", UID:"60c46167-0a10-4fac-9d43-63793b5b8132", APIVersion:"apps/v1", ResourceVersion:"369", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled down replica set coredns-66bff467f8 to 1
	I1201 19:04:09.792564       1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kube-system", Name:"coredns-66bff467f8", UID:"655f3348-30c5-406c-a288-8e962ab387d1", APIVersion:"apps/v1", ResourceVersion:"370", FieldPath:""}): type: 'Normal' reason: 'SuccessfulDelete' Deleted pod: coredns-66bff467f8-bs7wk
	I1201 19:04:26.704094       1 event.go:278] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"ingress-nginx", Name:"ingress-nginx-controller", UID:"75da375d-c0d4-4f07-9db4-f903caec7916", APIVersion:"apps/v1", ResourceVersion:"465", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set ingress-nginx-controller-7fcf777cb7 to 1
	I1201 19:04:26.727152       1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"ingress-nginx", Name:"ingress-nginx-controller-7fcf777cb7", UID:"68bd6a36-75e8-4112-b635-ba46179cae00", APIVersion:"apps/v1", ResourceVersion:"466", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: ingress-nginx-controller-7fcf777cb7-r9scq
	I1201 19:04:26.741843       1 event.go:278] Event(v1.ObjectReference{Kind:"Job", Namespace:"ingress-nginx", Name:"ingress-nginx-admission-create", UID:"19f8d16e-1f29-40b9-b123-18fc1955e3d2", APIVersion:"batch/v1", ResourceVersion:"470", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: ingress-nginx-admission-create-c27g5
	I1201 19:04:26.809832       1 event.go:278] Event(v1.ObjectReference{Kind:"Job", Namespace:"ingress-nginx", Name:"ingress-nginx-admission-patch", UID:"9bf2e8b6-175a-4604-a8c1-88bd93735714", APIVersion:"batch/v1", ResourceVersion:"484", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: ingress-nginx-admission-patch-5ttvl
	I1201 19:04:29.951332       1 event.go:278] Event(v1.ObjectReference{Kind:"Job", Namespace:"ingress-nginx", Name:"ingress-nginx-admission-patch", UID:"9bf2e8b6-175a-4604-a8c1-88bd93735714", APIVersion:"batch/v1", ResourceVersion:"490", FieldPath:""}): type: 'Normal' reason: 'Completed' Job completed
	I1201 19:04:29.972981       1 event.go:278] Event(v1.ObjectReference{Kind:"Job", Namespace:"ingress-nginx", Name:"ingress-nginx-admission-create", UID:"19f8d16e-1f29-40b9-b123-18fc1955e3d2", APIVersion:"batch/v1", ResourceVersion:"482", FieldPath:""}): type: 'Normal' reason: 'Completed' Job completed
	I1201 19:04:58.174198       1 event.go:278] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"default", Name:"hello-world-app", UID:"6b737cd9-2308-4816-9b63-830e97854f05", APIVersion:"apps/v1", ResourceVersion:"605", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set hello-world-app-5f5d8b66bb to 1
	I1201 19:04:58.191963       1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"default", Name:"hello-world-app-5f5d8b66bb", UID:"cedead01-9d31-4afe-983e-02e3ab61c7a6", APIVersion:"apps/v1", ResourceVersion:"606", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: hello-world-app-5f5d8b66bb-wd4qx
	
	* 
	* ==> kube-proxy [7d6571b43bec66131f168b595d25529e62290ef6d418ac56717d8b05381e4578] <==
	* W1201 19:04:09.426174       1 server_others.go:559] Unknown proxy mode "", assuming iptables proxy
	I1201 19:04:09.438554       1 node.go:136] Successfully retrieved node IP: 192.168.49.2
	I1201 19:04:09.438609       1 server_others.go:186] Using iptables Proxier.
	I1201 19:04:09.439304       1 server.go:583] Version: v1.18.20
	I1201 19:04:09.442338       1 config.go:315] Starting service config controller
	I1201 19:04:09.442521       1 shared_informer.go:223] Waiting for caches to sync for service config
	I1201 19:04:09.442722       1 config.go:133] Starting endpoints config controller
	I1201 19:04:09.442824       1 shared_informer.go:223] Waiting for caches to sync for endpoints config
	I1201 19:04:09.544421       1 shared_informer.go:230] Caches are synced for service config 
	I1201 19:04:09.544705       1 shared_informer.go:230] Caches are synced for endpoints config 
	
	* 
	* ==> kube-scheduler [bbc87b9ea593b5d42c79b5c8084f8381f0bed5bc7c165daee26a27de2f144005] <==
	* I1201 19:03:51.033498       1 registry.go:150] Registering EvenPodsSpread predicate and priority function
	I1201 19:03:51.035706       1 secure_serving.go:178] Serving securely on 127.0.0.1:10259
	I1201 19:03:51.036029       1 configmap_cafile_content.go:202] Starting client-ca::kube-system::extension-apiserver-authentication::client-ca-file
	I1201 19:03:51.036129       1 shared_informer.go:223] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
	I1201 19:03:51.036224       1 tlsconfig.go:240] Starting DynamicServingCertificateController
	E1201 19:03:51.040102       1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
	E1201 19:03:51.040518       1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
	E1201 19:03:51.040738       1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
	E1201 19:03:51.041244       1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
	E1201 19:03:51.043947       1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
	E1201 19:03:51.044239       1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
	E1201 19:03:51.044437       1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
	E1201 19:03:51.044635       1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
	E1201 19:03:51.044803       1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
	E1201 19:03:51.044967       1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
	E1201 19:03:51.045133       1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
	E1201 19:03:51.045299       1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
	E1201 19:03:51.889336       1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
	E1201 19:03:51.936308       1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
	E1201 19:03:51.991020       1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
	E1201 19:03:52.050897       1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
	E1201 19:03:52.072432       1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
	I1201 19:03:52.536314       1 shared_informer.go:230] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file 
	E1201 19:04:08.752444       1 factory.go:503] pod: kube-system/coredns-66bff467f8-bs7wk is already present in the active queue
	E1201 19:04:08.816979       1 factory.go:503] pod: kube-system/coredns-66bff467f8-v75d7 is already present in the active queue
	
	* 
	* ==> kubelet <==
	* Dec 01 19:05:02 ingress-addon-legacy-853196 kubelet[1647]: E1201 19:05:02.061069    1647 pod_workers.go:191] Error syncing pod 3698c895-313d-4080-8afd-6e664d82f226 ("hello-world-app-5f5d8b66bb-wd4qx_default(3698c895-313d-4080-8afd-6e664d82f226)"), skipping: failed to "StartContainer" for "hello-world-app" with CrashLoopBackOff: "back-off 10s restarting failed container=hello-world-app pod=hello-world-app-5f5d8b66bb-wd4qx_default(3698c895-313d-4080-8afd-6e664d82f226)"
	Dec 01 19:05:03 ingress-addon-legacy-853196 kubelet[1647]: I1201 19:05:03.064017    1647 topology_manager.go:221] [topologymanager] RemoveContainer - Container ID: c4113f826772bfac2cd4bbc0ef089d41418ad754426d96b733409574fb367445
	Dec 01 19:05:03 ingress-addon-legacy-853196 kubelet[1647]: E1201 19:05:03.064335    1647 pod_workers.go:191] Error syncing pod 3698c895-313d-4080-8afd-6e664d82f226 ("hello-world-app-5f5d8b66bb-wd4qx_default(3698c895-313d-4080-8afd-6e664d82f226)"), skipping: failed to "StartContainer" for "hello-world-app" with CrashLoopBackOff: "back-off 10s restarting failed container=hello-world-app pod=hello-world-app-5f5d8b66bb-wd4qx_default(3698c895-313d-4080-8afd-6e664d82f226)"
	Dec 01 19:05:09 ingress-addon-legacy-853196 kubelet[1647]: I1201 19:05:09.709762    1647 topology_manager.go:221] [topologymanager] RemoveContainer - Container ID: ed8e97fcb706ff7143e5b0d990df6de6070cb79b7252f7f9040fe0d364b61aa3
	Dec 01 19:05:09 ingress-addon-legacy-853196 kubelet[1647]: E1201 19:05:09.710614    1647 pod_workers.go:191] Error syncing pod f7be5420-c875-448d-9a55-a9fe5a3c2647 ("kube-ingress-dns-minikube_kube-system(f7be5420-c875-448d-9a55-a9fe5a3c2647)"), skipping: failed to "StartContainer" for "minikube-ingress-dns" with CrashLoopBackOff: "back-off 20s restarting failed container=minikube-ingress-dns pod=kube-ingress-dns-minikube_kube-system(f7be5420-c875-448d-9a55-a9fe5a3c2647)"
	Dec 01 19:05:13 ingress-addon-legacy-853196 kubelet[1647]: I1201 19:05:13.717392    1647 topology_manager.go:221] [topologymanager] RemoveContainer - Container ID: c4113f826772bfac2cd4bbc0ef089d41418ad754426d96b733409574fb367445
	Dec 01 19:05:14 ingress-addon-legacy-853196 kubelet[1647]: I1201 19:05:14.086837    1647 topology_manager.go:221] [topologymanager] RemoveContainer - Container ID: c4113f826772bfac2cd4bbc0ef089d41418ad754426d96b733409574fb367445
	Dec 01 19:05:14 ingress-addon-legacy-853196 kubelet[1647]: I1201 19:05:14.087202    1647 topology_manager.go:221] [topologymanager] RemoveContainer - Container ID: 867e7eb6262c2aa8af1d30f0b5ce281a887a30fd20525d060fe2de2257de3f2f
	Dec 01 19:05:14 ingress-addon-legacy-853196 kubelet[1647]: E1201 19:05:14.087469    1647 pod_workers.go:191] Error syncing pod 3698c895-313d-4080-8afd-6e664d82f226 ("hello-world-app-5f5d8b66bb-wd4qx_default(3698c895-313d-4080-8afd-6e664d82f226)"), skipping: failed to "StartContainer" for "hello-world-app" with CrashLoopBackOff: "back-off 20s restarting failed container=hello-world-app pod=hello-world-app-5f5d8b66bb-wd4qx_default(3698c895-313d-4080-8afd-6e664d82f226)"
	Dec 01 19:05:14 ingress-addon-legacy-853196 kubelet[1647]: I1201 19:05:14.262309    1647 reconciler.go:196] operationExecutor.UnmountVolume started for volume "minikube-ingress-dns-token-5ps28" (UniqueName: "kubernetes.io/secret/f7be5420-c875-448d-9a55-a9fe5a3c2647-minikube-ingress-dns-token-5ps28") pod "f7be5420-c875-448d-9a55-a9fe5a3c2647" (UID: "f7be5420-c875-448d-9a55-a9fe5a3c2647")
	Dec 01 19:05:14 ingress-addon-legacy-853196 kubelet[1647]: I1201 19:05:14.266588    1647 operation_generator.go:782] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f7be5420-c875-448d-9a55-a9fe5a3c2647-minikube-ingress-dns-token-5ps28" (OuterVolumeSpecName: "minikube-ingress-dns-token-5ps28") pod "f7be5420-c875-448d-9a55-a9fe5a3c2647" (UID: "f7be5420-c875-448d-9a55-a9fe5a3c2647"). InnerVolumeSpecName "minikube-ingress-dns-token-5ps28". PluginName "kubernetes.io/secret", VolumeGidValue ""
	Dec 01 19:05:14 ingress-addon-legacy-853196 kubelet[1647]: I1201 19:05:14.362750    1647 reconciler.go:319] Volume detached for volume "minikube-ingress-dns-token-5ps28" (UniqueName: "kubernetes.io/secret/f7be5420-c875-448d-9a55-a9fe5a3c2647-minikube-ingress-dns-token-5ps28") on node "ingress-addon-legacy-853196" DevicePath ""
	Dec 01 19:05:16 ingress-addon-legacy-853196 kubelet[1647]: I1201 19:05:16.101055    1647 topology_manager.go:221] [topologymanager] RemoveContainer - Container ID: ed8e97fcb706ff7143e5b0d990df6de6070cb79b7252f7f9040fe0d364b61aa3
	Dec 01 19:05:18 ingress-addon-legacy-853196 kubelet[1647]: E1201 19:05:18.698505    1647 event.go:260] Server rejected event '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"ingress-nginx-controller-7fcf777cb7-r9scq.179cca9939c5d8b5", GenerateName:"", Namespace:"ingress-nginx", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"ingress-nginx", Name:"ingress-nginx-controller-7fcf777cb7-r9scq", UID:"b68d6b78-c587-4ed0-a1c7-5cee15f6d4fb", APIVersion:"v1", ResourceVersion:"472", FieldPath:"spec.containers{controller}"}, Reason:"Killing", Message:"Stoppi
ng container controller", Source:v1.EventSource{Component:"kubelet", Host:"ingress-addon-legacy-853196"}, FirstTimestamp:v1.Time{Time:time.Time{wall:0xc152a97ba8f70cb5, ext:84724089372, loc:(*time.Location)(0x6a0ef20)}}, LastTimestamp:v1.Time{Time:time.Time{wall:0xc152a97ba8f70cb5, ext:84724089372, loc:(*time.Location)(0x6a0ef20)}}, Count:1, Type:"Normal", EventTime:v1.MicroTime{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'events "ingress-nginx-controller-7fcf777cb7-r9scq.179cca9939c5d8b5" is forbidden: unable to create new content in namespace ingress-nginx because it is being terminated' (will not retry!)
	Dec 01 19:05:18 ingress-addon-legacy-853196 kubelet[1647]: E1201 19:05:18.712860    1647 event.go:260] Server rejected event '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"ingress-nginx-controller-7fcf777cb7-r9scq.179cca9939c5d8b5", GenerateName:"", Namespace:"ingress-nginx", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"ingress-nginx", Name:"ingress-nginx-controller-7fcf777cb7-r9scq", UID:"b68d6b78-c587-4ed0-a1c7-5cee15f6d4fb", APIVersion:"v1", ResourceVersion:"472", FieldPath:"spec.containers{controller}"}, Reason:"Killing", Message:"Stoppi
ng container controller", Source:v1.EventSource{Component:"kubelet", Host:"ingress-addon-legacy-853196"}, FirstTimestamp:v1.Time{Time:time.Time{wall:0xc152a97ba8f70cb5, ext:84724089372, loc:(*time.Location)(0x6a0ef20)}}, LastTimestamp:v1.Time{Time:time.Time{wall:0xc152a97ba9c44252, ext:84737537977, loc:(*time.Location)(0x6a0ef20)}}, Count:2, Type:"Normal", EventTime:v1.MicroTime{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'events "ingress-nginx-controller-7fcf777cb7-r9scq.179cca9939c5d8b5" is forbidden: unable to create new content in namespace ingress-nginx because it is being terminated' (will not retry!)
	Dec 01 19:05:21 ingress-addon-legacy-853196 kubelet[1647]: W1201 19:05:21.113879    1647 pod_container_deletor.go:77] Container "c41cb1602a2acbdb68115c94aa665e9f3ab24b9545a74a3bd35b93d223119b65" not found in pod's containers
	Dec 01 19:05:22 ingress-addon-legacy-853196 kubelet[1647]: I1201 19:05:22.791371    1647 reconciler.go:196] operationExecutor.UnmountVolume started for volume "ingress-nginx-token-p2lxm" (UniqueName: "kubernetes.io/secret/b68d6b78-c587-4ed0-a1c7-5cee15f6d4fb-ingress-nginx-token-p2lxm") pod "b68d6b78-c587-4ed0-a1c7-5cee15f6d4fb" (UID: "b68d6b78-c587-4ed0-a1c7-5cee15f6d4fb")
	Dec 01 19:05:22 ingress-addon-legacy-853196 kubelet[1647]: I1201 19:05:22.791441    1647 reconciler.go:196] operationExecutor.UnmountVolume started for volume "webhook-cert" (UniqueName: "kubernetes.io/secret/b68d6b78-c587-4ed0-a1c7-5cee15f6d4fb-webhook-cert") pod "b68d6b78-c587-4ed0-a1c7-5cee15f6d4fb" (UID: "b68d6b78-c587-4ed0-a1c7-5cee15f6d4fb")
	Dec 01 19:05:22 ingress-addon-legacy-853196 kubelet[1647]: I1201 19:05:22.798607    1647 operation_generator.go:782] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b68d6b78-c587-4ed0-a1c7-5cee15f6d4fb-ingress-nginx-token-p2lxm" (OuterVolumeSpecName: "ingress-nginx-token-p2lxm") pod "b68d6b78-c587-4ed0-a1c7-5cee15f6d4fb" (UID: "b68d6b78-c587-4ed0-a1c7-5cee15f6d4fb"). InnerVolumeSpecName "ingress-nginx-token-p2lxm". PluginName "kubernetes.io/secret", VolumeGidValue ""
	Dec 01 19:05:22 ingress-addon-legacy-853196 kubelet[1647]: I1201 19:05:22.800174    1647 operation_generator.go:782] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b68d6b78-c587-4ed0-a1c7-5cee15f6d4fb-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "b68d6b78-c587-4ed0-a1c7-5cee15f6d4fb" (UID: "b68d6b78-c587-4ed0-a1c7-5cee15f6d4fb"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue ""
	Dec 01 19:05:22 ingress-addon-legacy-853196 kubelet[1647]: I1201 19:05:22.891854    1647 reconciler.go:319] Volume detached for volume "ingress-nginx-token-p2lxm" (UniqueName: "kubernetes.io/secret/b68d6b78-c587-4ed0-a1c7-5cee15f6d4fb-ingress-nginx-token-p2lxm") on node "ingress-addon-legacy-853196" DevicePath ""
	Dec 01 19:05:22 ingress-addon-legacy-853196 kubelet[1647]: I1201 19:05:22.891920    1647 reconciler.go:319] Volume detached for volume "webhook-cert" (UniqueName: "kubernetes.io/secret/b68d6b78-c587-4ed0-a1c7-5cee15f6d4fb-webhook-cert") on node "ingress-addon-legacy-853196" DevicePath ""
	Dec 01 19:05:23 ingress-addon-legacy-853196 kubelet[1647]: W1201 19:05:23.715963    1647 kubelet_getters.go:297] Path "/var/lib/kubelet/pods/b68d6b78-c587-4ed0-a1c7-5cee15f6d4fb/volumes" does not exist
	Dec 01 19:05:25 ingress-addon-legacy-853196 kubelet[1647]: I1201 19:05:25.712973    1647 topology_manager.go:221] [topologymanager] RemoveContainer - Container ID: 867e7eb6262c2aa8af1d30f0b5ce281a887a30fd20525d060fe2de2257de3f2f
	Dec 01 19:05:25 ingress-addon-legacy-853196 kubelet[1647]: E1201 19:05:25.713261    1647 pod_workers.go:191] Error syncing pod 3698c895-313d-4080-8afd-6e664d82f226 ("hello-world-app-5f5d8b66bb-wd4qx_default(3698c895-313d-4080-8afd-6e664d82f226)"), skipping: failed to "StartContainer" for "hello-world-app" with CrashLoopBackOff: "back-off 20s restarting failed container=hello-world-app pod=hello-world-app-5f5d8b66bb-wd4qx_default(3698c895-313d-4080-8afd-6e664d82f226)"
	
	* 
	* ==> storage-provisioner [335972d2dfa898c9489758ca360dd10c30223e64b9d749753f67fa6d29f4fc4a] <==
	* I1201 19:04:13.426333       1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
	I1201 19:04:13.438360       1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
	I1201 19:04:13.438449       1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
	I1201 19:04:13.445921       1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
	I1201 19:04:13.446445       1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"0b49a4f8-63c7-41b3-84a2-f18329f67a71", APIVersion:"v1", ResourceVersion:"411", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' ingress-addon-legacy-853196_1897a10b-e04c-4a38-bee1-1fbee4ef60b1 became leader
	I1201 19:04:13.446511       1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_ingress-addon-legacy-853196_1897a10b-e04c-4a38-bee1-1fbee4ef60b1!
	I1201 19:04:13.547627       1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_ingress-addon-legacy-853196_1897a10b-e04c-4a38-bee1-1fbee4ef60b1!
	

                                                
                                                
-- /stdout --
helpers_test.go:254: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p ingress-addon-legacy-853196 -n ingress-addon-legacy-853196
helpers_test.go:261: (dbg) Run:  kubectl --context ingress-addon-legacy-853196 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:285: <<< TestIngressAddonLegacy/serial/ValidateIngressAddons FAILED: end of post-mortem logs <<<
helpers_test.go:286: ---------------------/post-mortem---------------------------------
--- FAIL: TestIngressAddonLegacy/serial/ValidateIngressAddons (50.73s)

                                                
                                    

Test pass (269/314)

Order passed test Duration
3 TestDownloadOnly/v1.16.0/json-events 18.69
4 TestDownloadOnly/v1.16.0/preload-exists 0
8 TestDownloadOnly/v1.16.0/LogsDuration 0.09
10 TestDownloadOnly/v1.28.4/json-events 16.74
11 TestDownloadOnly/v1.28.4/preload-exists 0
15 TestDownloadOnly/v1.28.4/LogsDuration 0.1
17 TestDownloadOnly/v1.29.0-rc.1/json-events 17.5
18 TestDownloadOnly/v1.29.0-rc.1/preload-exists 0
22 TestDownloadOnly/v1.29.0-rc.1/LogsDuration 0.09
23 TestDownloadOnly/DeleteAll 0.25
24 TestDownloadOnly/DeleteAlwaysSucceeds 0.43
26 TestBinaryMirror 0.67
30 TestAddons/PreSetup/EnablingAddonOnNonExistingCluster 0.1
31 TestAddons/PreSetup/DisablingAddonOnNonExistingCluster 0.1
32 TestAddons/Setup 142.33
34 TestAddons/parallel/Registry 15.22
36 TestAddons/parallel/InspektorGadget 11.04
37 TestAddons/parallel/MetricsServer 6.18
40 TestAddons/parallel/CSI 59.99
41 TestAddons/parallel/Headlamp 12.65
42 TestAddons/parallel/CloudSpanner 5.71
43 TestAddons/parallel/LocalPath 57.17
44 TestAddons/parallel/NvidiaDevicePlugin 5.72
47 TestAddons/serial/GCPAuth/Namespaces 0.19
48 TestAddons/StoppedEnableDisable 12.65
49 TestCertOptions 37.56
50 TestCertExpiration 225.27
52 TestForceSystemdFlag 35.52
53 TestForceSystemdEnv 42.1
54 TestDockerEnvContainerd 49.07
59 TestErrorSpam/setup 33.3
60 TestErrorSpam/start 0.93
61 TestErrorSpam/status 1.14
62 TestErrorSpam/pause 1.91
63 TestErrorSpam/unpause 2.14
64 TestErrorSpam/stop 1.51
67 TestFunctional/serial/CopySyncFile 0
68 TestFunctional/serial/StartWithProxy 61.74
69 TestFunctional/serial/AuditLog 0
70 TestFunctional/serial/SoftStart 6.4
71 TestFunctional/serial/KubeContext 0.07
72 TestFunctional/serial/KubectlGetPods 0.09
75 TestFunctional/serial/CacheCmd/cache/add_remote 4.09
76 TestFunctional/serial/CacheCmd/cache/add_local 1.47
77 TestFunctional/serial/CacheCmd/cache/CacheDelete 0.08
78 TestFunctional/serial/CacheCmd/cache/list 0.07
79 TestFunctional/serial/CacheCmd/cache/verify_cache_inside_node 0.35
80 TestFunctional/serial/CacheCmd/cache/cache_reload 2.28
81 TestFunctional/serial/CacheCmd/cache/delete 0.16
82 TestFunctional/serial/MinikubeKubectlCmd 0.16
83 TestFunctional/serial/MinikubeKubectlCmdDirectly 0.16
86 TestFunctional/serial/LogsCmd 1.62
90 TestFunctional/parallel/ConfigCmd 0.6
91 TestFunctional/parallel/DashboardCmd 9.41
92 TestFunctional/parallel/DryRun 0.79
93 TestFunctional/parallel/InternationalLanguage 0.33
94 TestFunctional/parallel/StatusCmd 1.2
98 TestFunctional/parallel/ServiceCmdConnect 8.97
99 TestFunctional/parallel/AddonsCmd 0.2
100 TestFunctional/parallel/PersistentVolumeClaim 91.97
102 TestFunctional/parallel/SSHCmd 0.77
103 TestFunctional/parallel/CpCmd 1.64
105 TestFunctional/parallel/FileSync 0.43
106 TestFunctional/parallel/CertSync 2.47
110 TestFunctional/parallel/NodeLabels 0.14
112 TestFunctional/parallel/NonActiveRuntimeDisabled 0.91
114 TestFunctional/parallel/License 0.43
117 TestFunctional/parallel/TunnelCmd/serial/StartTunnel 0
121 TestFunctional/parallel/ServiceCmd/DeployApp 7.26
122 TestFunctional/parallel/ServiceCmd/List 0.42
123 TestFunctional/parallel/ServiceCmd/JSONOutput 0.41
124 TestFunctional/parallel/ServiceCmd/HTTPS 0.45
125 TestFunctional/parallel/ServiceCmd/Format 0.43
126 TestFunctional/parallel/ServiceCmd/URL 0.43
127 TestFunctional/parallel/ProfileCmd/profile_not_create 0.48
128 TestFunctional/parallel/ProfileCmd/profile_list 0.47
129 TestFunctional/parallel/ProfileCmd/profile_json_output 0.44
130 TestFunctional/parallel/MountCmd/any-port 7.75
134 TestFunctional/parallel/TunnelCmd/serial/DeleteTunnel 0.11
135 TestFunctional/parallel/MountCmd/specific-port 2.77
136 TestFunctional/parallel/MountCmd/VerifyCleanup 2.87
137 TestFunctional/parallel/Version/short 0.09
138 TestFunctional/parallel/Version/components 1.43
139 TestFunctional/parallel/ImageCommands/ImageListShort 0.29
140 TestFunctional/parallel/ImageCommands/ImageListTable 0.42
141 TestFunctional/parallel/ImageCommands/ImageListJson 0.41
142 TestFunctional/parallel/ImageCommands/ImageListYaml 0.35
143 TestFunctional/parallel/ImageCommands/ImageBuild 3.08
144 TestFunctional/parallel/ImageCommands/Setup 2
145 TestFunctional/parallel/UpdateContextCmd/no_changes 0.28
146 TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster 0.28
147 TestFunctional/parallel/UpdateContextCmd/no_clusters 0.26
152 TestFunctional/parallel/ImageCommands/ImageRemove 0.53
154 TestFunctional/parallel/ImageCommands/ImageSaveDaemon 0.64
155 TestFunctional/delete_addon-resizer_images 0.08
156 TestFunctional/delete_my-image_image 0.02
157 TestFunctional/delete_minikube_cached_images 0.02
161 TestIngressAddonLegacy/StartLegacyK8sCluster 94.65
163 TestIngressAddonLegacy/serial/ValidateIngressAddonActivation 10.59
164 TestIngressAddonLegacy/serial/ValidateIngressDNSAddonActivation 0.7
168 TestJSONOutput/start/Command 85.36
169 TestJSONOutput/start/Audit 0
171 TestJSONOutput/start/parallel/DistinctCurrentSteps 0
172 TestJSONOutput/start/parallel/IncreasingCurrentSteps 0
174 TestJSONOutput/pause/Command 0.84
175 TestJSONOutput/pause/Audit 0
177 TestJSONOutput/pause/parallel/DistinctCurrentSteps 0
178 TestJSONOutput/pause/parallel/IncreasingCurrentSteps 0
180 TestJSONOutput/unpause/Command 0.77
181 TestJSONOutput/unpause/Audit 0
183 TestJSONOutput/unpause/parallel/DistinctCurrentSteps 0
184 TestJSONOutput/unpause/parallel/IncreasingCurrentSteps 0
186 TestJSONOutput/stop/Command 5.81
187 TestJSONOutput/stop/Audit 0
189 TestJSONOutput/stop/parallel/DistinctCurrentSteps 0
190 TestJSONOutput/stop/parallel/IncreasingCurrentSteps 0
191 TestErrorJSONOutput 0.27
193 TestKicCustomNetwork/create_custom_network 50.1
194 TestKicCustomNetwork/use_default_bridge_network 34.62
195 TestKicExistingNetwork 38.23
196 TestKicCustomSubnet 35.5
197 TestKicStaticIP 37.76
198 TestMainNoArgs 0.07
199 TestMinikubeProfile 66.2
202 TestMountStart/serial/StartWithMountFirst 9.65
203 TestMountStart/serial/VerifyMountFirst 0.3
204 TestMountStart/serial/StartWithMountSecond 7.67
205 TestMountStart/serial/VerifyMountSecond 0.31
206 TestMountStart/serial/DeleteFirst 1.71
207 TestMountStart/serial/VerifyMountPostDelete 0.3
208 TestMountStart/serial/Stop 1.24
209 TestMountStart/serial/RestartStopped 7.71
210 TestMountStart/serial/VerifyMountPostStop 0.42
213 TestMultiNode/serial/FreshStart2Nodes 105.92
214 TestMultiNode/serial/DeployApp2Nodes 4.96
215 TestMultiNode/serial/PingHostFrom2Pods 1.22
216 TestMultiNode/serial/AddNode 21.47
217 TestMultiNode/serial/MultiNodeLabels 0.1
218 TestMultiNode/serial/ProfileList 0.39
219 TestMultiNode/serial/CopyFile 11.9
220 TestMultiNode/serial/StopNode 2.53
221 TestMultiNode/serial/StartAfterStop 12.97
222 TestMultiNode/serial/RestartKeepsNodes 122.12
223 TestMultiNode/serial/DeleteNode 5.22
224 TestMultiNode/serial/StopMultiNode 24.47
225 TestMultiNode/serial/RestartMultiNode 81.81
226 TestMultiNode/serial/ValidateNameConflict 37.33
231 TestPreload 185.42
233 TestScheduledStopUnix 109.09
236 TestInsufficientStorage 10.55
237 TestRunningBinaryUpgrade 87.83
239 TestKubernetesUpgrade 140.63
240 TestMissingContainerUpgrade 172.51
242 TestPause/serial/Start 98.8
243 TestPause/serial/SecondStartNoReconfiguration 6.82
244 TestPause/serial/Pause 0.87
245 TestPause/serial/VerifyStatus 0.4
246 TestPause/serial/Unpause 1.17
247 TestPause/serial/PauseAgain 1.15
248 TestPause/serial/DeletePaused 5.21
249 TestPause/serial/VerifyDeletedResources 1.87
250 TestStoppedBinaryUpgrade/Setup 2.03
251 TestStoppedBinaryUpgrade/Upgrade 85.47
252 TestStoppedBinaryUpgrade/MinikubeLogs 1.63
261 TestNoKubernetes/serial/StartNoK8sWithVersion 0.1
262 TestNoKubernetes/serial/StartWithK8s 41.53
266 TestNoKubernetes/serial/StartWithStopK8s 19.66
271 TestNetworkPlugins/group/false 4.8
275 TestNoKubernetes/serial/Start 9.79
276 TestNoKubernetes/serial/VerifyK8sNotRunning 0.41
277 TestNoKubernetes/serial/ProfileList 1.21
278 TestNoKubernetes/serial/Stop 1.34
279 TestNoKubernetes/serial/StartNoArgs 7.86
280 TestNoKubernetes/serial/VerifyK8sNotRunningSecond 0.39
282 TestStartStop/group/old-k8s-version/serial/FirstStart 119.92
284 TestStartStop/group/default-k8s-diff-port/serial/FirstStart 93.35
285 TestStartStop/group/old-k8s-version/serial/DeployApp 8.64
286 TestStartStop/group/old-k8s-version/serial/EnableAddonWhileActive 1.18
287 TestStartStop/group/old-k8s-version/serial/Stop 12.51
288 TestStartStop/group/old-k8s-version/serial/EnableAddonAfterStop 0.28
289 TestStartStop/group/old-k8s-version/serial/SecondStart 652.2
290 TestStartStop/group/default-k8s-diff-port/serial/DeployApp 8.53
291 TestStartStop/group/default-k8s-diff-port/serial/EnableAddonWhileActive 1.3
292 TestStartStop/group/default-k8s-diff-port/serial/Stop 12.26
293 TestStartStop/group/default-k8s-diff-port/serial/EnableAddonAfterStop 0.25
294 TestStartStop/group/default-k8s-diff-port/serial/SecondStart 338.72
295 TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop 13.03
296 TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop 5.12
297 TestStartStop/group/default-k8s-diff-port/serial/VerifyKubernetesImages 0.32
298 TestStartStop/group/default-k8s-diff-port/serial/Pause 3.64
300 TestStartStop/group/embed-certs/serial/FirstStart 59.57
301 TestStartStop/group/embed-certs/serial/DeployApp 8.48
302 TestStartStop/group/embed-certs/serial/EnableAddonWhileActive 1.25
303 TestStartStop/group/embed-certs/serial/Stop 12.27
304 TestStartStop/group/embed-certs/serial/EnableAddonAfterStop 0.24
305 TestStartStop/group/embed-certs/serial/SecondStart 334.54
306 TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop 5.03
307 TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop 5.11
308 TestStartStop/group/old-k8s-version/serial/VerifyKubernetesImages 0.28
309 TestStartStop/group/old-k8s-version/serial/Pause 3.7
311 TestStartStop/group/no-preload/serial/FirstStart 70.59
312 TestStartStop/group/no-preload/serial/DeployApp 10.02
313 TestStartStop/group/no-preload/serial/EnableAddonWhileActive 1.23
314 TestStartStop/group/no-preload/serial/Stop 12.32
315 TestStartStop/group/no-preload/serial/EnableAddonAfterStop 0.25
316 TestStartStop/group/no-preload/serial/SecondStart 317.87
317 TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop 15.03
318 TestStartStop/group/embed-certs/serial/AddonExistsAfterStop 5.11
319 TestStartStop/group/embed-certs/serial/VerifyKubernetesImages 0.33
320 TestStartStop/group/embed-certs/serial/Pause 3.63
322 TestStartStop/group/newest-cni/serial/FirstStart 48.12
323 TestStartStop/group/newest-cni/serial/DeployApp 0
324 TestStartStop/group/newest-cni/serial/EnableAddonWhileActive 1.24
325 TestStartStop/group/newest-cni/serial/Stop 1.31
326 TestStartStop/group/newest-cni/serial/EnableAddonAfterStop 0.23
327 TestStartStop/group/newest-cni/serial/SecondStart 30.34
328 TestStartStop/group/newest-cni/serial/UserAppExistsAfterStop 0
329 TestStartStop/group/newest-cni/serial/AddonExistsAfterStop 0
330 TestStartStop/group/newest-cni/serial/VerifyKubernetesImages 0.31
331 TestStartStop/group/newest-cni/serial/Pause 3.51
332 TestNetworkPlugins/group/auto/Start 61.89
333 TestNetworkPlugins/group/auto/KubeletFlags 0.53
334 TestNetworkPlugins/group/auto/NetCatPod 10.47
335 TestNetworkPlugins/group/auto/DNS 0.22
336 TestNetworkPlugins/group/auto/Localhost 0.22
337 TestNetworkPlugins/group/auto/HairPin 0.2
338 TestStartStop/group/no-preload/serial/UserAppExistsAfterStop 12.03
339 TestNetworkPlugins/group/flannel/Start 67.36
340 TestStartStop/group/no-preload/serial/AddonExistsAfterStop 5.15
341 TestStartStop/group/no-preload/serial/VerifyKubernetesImages 0.35
342 TestStartStop/group/no-preload/serial/Pause 5.02
343 TestNetworkPlugins/group/calico/Start 69.54
344 TestNetworkPlugins/group/flannel/ControllerPod 5.05
345 TestNetworkPlugins/group/flannel/KubeletFlags 0.38
346 TestNetworkPlugins/group/flannel/NetCatPod 11.43
347 TestNetworkPlugins/group/flannel/DNS 0.26
348 TestNetworkPlugins/group/flannel/Localhost 0.22
349 TestNetworkPlugins/group/flannel/HairPin 0.2
350 TestNetworkPlugins/group/calico/ControllerPod 5.05
351 TestNetworkPlugins/group/calico/KubeletFlags 0.45
352 TestNetworkPlugins/group/calico/NetCatPod 10.65
353 TestNetworkPlugins/group/calico/DNS 0.32
354 TestNetworkPlugins/group/calico/Localhost 0.3
355 TestNetworkPlugins/group/calico/HairPin 0.24
356 TestNetworkPlugins/group/custom-flannel/Start 71.22
357 TestNetworkPlugins/group/kindnet/Start 102.74
358 TestNetworkPlugins/group/custom-flannel/KubeletFlags 0.44
359 TestNetworkPlugins/group/custom-flannel/NetCatPod 11.49
360 TestNetworkPlugins/group/custom-flannel/DNS 0.21
361 TestNetworkPlugins/group/custom-flannel/Localhost 0.17
362 TestNetworkPlugins/group/custom-flannel/HairPin 0.19
363 TestNetworkPlugins/group/bridge/Start 84.91
364 TestNetworkPlugins/group/kindnet/ControllerPod 5.04
365 TestNetworkPlugins/group/kindnet/KubeletFlags 0.44
366 TestNetworkPlugins/group/kindnet/NetCatPod 10.65
367 TestNetworkPlugins/group/kindnet/DNS 0.22
368 TestNetworkPlugins/group/kindnet/Localhost 0.19
369 TestNetworkPlugins/group/kindnet/HairPin 0.2
370 TestNetworkPlugins/group/enable-default-cni/Start 83.46
371 TestNetworkPlugins/group/bridge/KubeletFlags 0.48
372 TestNetworkPlugins/group/bridge/NetCatPod 10.58
373 TestNetworkPlugins/group/bridge/DNS 0.25
374 TestNetworkPlugins/group/bridge/Localhost 0.19
375 TestNetworkPlugins/group/bridge/HairPin 0.25
376 TestNetworkPlugins/group/enable-default-cni/KubeletFlags 0.34
377 TestNetworkPlugins/group/enable-default-cni/NetCatPod 10.36
378 TestNetworkPlugins/group/enable-default-cni/DNS 0.2
379 TestNetworkPlugins/group/enable-default-cni/Localhost 0.17
380 TestNetworkPlugins/group/enable-default-cni/HairPin 0.19
x
+
TestDownloadOnly/v1.16.0/json-events (18.69s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.16.0/json-events
aaa_download_only_test.go:69: (dbg) Run:  out/minikube-linux-arm64 start -o=json --download-only -p download-only-609102 --force --alsologtostderr --kubernetes-version=v1.16.0 --container-runtime=containerd --driver=docker  --container-runtime=containerd
aaa_download_only_test.go:69: (dbg) Done: out/minikube-linux-arm64 start -o=json --download-only -p download-only-609102 --force --alsologtostderr --kubernetes-version=v1.16.0 --container-runtime=containerd --driver=docker  --container-runtime=containerd: (18.685441782s)
--- PASS: TestDownloadOnly/v1.16.0/json-events (18.69s)

                                                
                                    
x
+
TestDownloadOnly/v1.16.0/preload-exists (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.16.0/preload-exists
--- PASS: TestDownloadOnly/v1.16.0/preload-exists (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.16.0/LogsDuration (0.09s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.16.0/LogsDuration
aaa_download_only_test.go:172: (dbg) Run:  out/minikube-linux-arm64 logs -p download-only-609102
aaa_download_only_test.go:172: (dbg) Non-zero exit: out/minikube-linux-arm64 logs -p download-only-609102: exit status 85 (91.707947ms)

                                                
                                                
-- stdout --
	* 
	* ==> Audit <==
	* |---------|--------------------------------|----------------------|---------|---------|---------------------|----------|
	| Command |              Args              |       Profile        |  User   | Version |     Start Time      | End Time |
	|---------|--------------------------------|----------------------|---------|---------|---------------------|----------|
	| start   | -o=json --download-only        | download-only-609102 | jenkins | v1.32.0 | 01 Dec 23 18:51 UTC |          |
	|         | -p download-only-609102        |                      |         |         |                     |          |
	|         | --force --alsologtostderr      |                      |         |         |                     |          |
	|         | --kubernetes-version=v1.16.0   |                      |         |         |                     |          |
	|         | --container-runtime=containerd |                      |         |         |                     |          |
	|         | --driver=docker                |                      |         |         |                     |          |
	|         | --container-runtime=containerd |                      |         |         |                     |          |
	|---------|--------------------------------|----------------------|---------|---------|---------------------|----------|
	
	* 
	* ==> Last Start <==
	* Log file created at: 2023/12/01 18:51:06
	Running on machine: ip-172-31-31-251
	Binary: Built with gc go1.21.4 for linux/arm64
	Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
	I1201 18:51:06.589043  258306 out.go:296] Setting OutFile to fd 1 ...
	I1201 18:51:06.589282  258306 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I1201 18:51:06.589307  258306 out.go:309] Setting ErrFile to fd 2...
	I1201 18:51:06.589328  258306 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I1201 18:51:06.589651  258306 root.go:338] Updating PATH: /home/jenkins/minikube-integration/17703-252966/.minikube/bin
	W1201 18:51:06.589878  258306 root.go:314] Error reading config file at /home/jenkins/minikube-integration/17703-252966/.minikube/config/config.json: open /home/jenkins/minikube-integration/17703-252966/.minikube/config/config.json: no such file or directory
	I1201 18:51:06.590365  258306 out.go:303] Setting JSON to true
	I1201 18:51:06.591320  258306 start.go:128] hostinfo: {"hostname":"ip-172-31-31-251","uptime":5613,"bootTime":1701451054,"procs":168,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1050-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"982e3628-3742-4b3e-bb63-ac1b07660ec7"}
	I1201 18:51:06.591430  258306 start.go:138] virtualization:  
	I1201 18:51:06.594572  258306 out.go:97] [download-only-609102] minikube v1.32.0 on Ubuntu 20.04 (arm64)
	I1201 18:51:06.602084  258306 out.go:169] MINIKUBE_LOCATION=17703
	W1201 18:51:06.600727  258306 preload.go:295] Failed to list preload files: open /home/jenkins/minikube-integration/17703-252966/.minikube/cache/preloaded-tarball: no such file or directory
	I1201 18:51:06.600793  258306 notify.go:220] Checking for updates...
	I1201 18:51:06.604424  258306 out.go:169] MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I1201 18:51:06.606680  258306 out.go:169] KUBECONFIG=/home/jenkins/minikube-integration/17703-252966/kubeconfig
	I1201 18:51:06.611765  258306 out.go:169] MINIKUBE_HOME=/home/jenkins/minikube-integration/17703-252966/.minikube
	I1201 18:51:06.613709  258306 out.go:169] MINIKUBE_BIN=out/minikube-linux-arm64
	W1201 18:51:06.617835  258306 out.go:272] minikube skips various validations when --force is supplied; this may lead to unexpected behavior
	I1201 18:51:06.618107  258306 driver.go:392] Setting default libvirt URI to qemu:///system
	I1201 18:51:06.643886  258306 docker.go:122] docker version: linux-24.0.7:Docker Engine - Community
	I1201 18:51:06.644005  258306 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I1201 18:51:06.719291  258306 info.go:266] docker info: {ID:EOU5:DNGX:XN6V:L2FZ:UXRM:5TWK:EVUR:KC2F:GT7Z:Y4O4:GB77:5PD3 Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:28 OomKillDisable:true NGoroutines:42 SystemTime:2023-12-01 18:51:06.709387263 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1050-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archi
tecture:aarch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8215040000 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-31-251 Labels:[] ExperimentalBuild:false ServerVersion:24.0.7 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f Expected:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f} RuncCommit:{ID:v1.1.10-0-g18a0cb0 Expected:v1.1.10-0-g18a0cb0} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.21.0]] Warnings:<nil>}}
	I1201 18:51:06.719402  258306 docker.go:295] overlay module found
	I1201 18:51:06.721761  258306 out.go:97] Using the docker driver based on user configuration
	I1201 18:51:06.721785  258306 start.go:298] selected driver: docker
	I1201 18:51:06.721795  258306 start.go:902] validating driver "docker" against <nil>
	I1201 18:51:06.721906  258306 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I1201 18:51:06.790853  258306 info.go:266] docker info: {ID:EOU5:DNGX:XN6V:L2FZ:UXRM:5TWK:EVUR:KC2F:GT7Z:Y4O4:GB77:5PD3 Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:28 OomKillDisable:true NGoroutines:42 SystemTime:2023-12-01 18:51:06.780852421 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1050-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archi
tecture:aarch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8215040000 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-31-251 Labels:[] ExperimentalBuild:false ServerVersion:24.0.7 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f Expected:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f} RuncCommit:{ID:v1.1.10-0-g18a0cb0 Expected:v1.1.10-0-g18a0cb0} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.21.0]] Warnings:<nil>}}
	I1201 18:51:06.791030  258306 start_flags.go:309] no existing cluster config was found, will generate one from the flags 
	I1201 18:51:06.791409  258306 start_flags.go:394] Using suggested 2200MB memory alloc based on sys=7834MB, container=7834MB
	I1201 18:51:06.791597  258306 start_flags.go:913] Wait components to verify : map[apiserver:true system_pods:true]
	I1201 18:51:06.793878  258306 out.go:169] Using Docker driver with root privileges
	I1201 18:51:06.795793  258306 cni.go:84] Creating CNI manager for ""
	I1201 18:51:06.795821  258306 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
	I1201 18:51:06.795833  258306 start_flags.go:318] Found "CNI" CNI - setting NetworkPlugin=cni
	I1201 18:51:06.795855  258306 start_flags.go:323] config:
	{Name:download-only-609102 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f Memory:2200 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.16.0 ClusterName:download-only-609102 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRu
ntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 AutoPauseInterval:1m0s GPUs:}
	I1201 18:51:06.797722  258306 out.go:97] Starting control plane node download-only-609102 in cluster download-only-609102
	I1201 18:51:06.797753  258306 cache.go:121] Beginning downloading kic base image for docker with containerd
	I1201 18:51:06.799846  258306 out.go:97] Pulling base image ...
	I1201 18:51:06.799883  258306 preload.go:132] Checking if preload exists for k8s version v1.16.0 and runtime containerd
	I1201 18:51:06.799982  258306 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f in local docker daemon
	I1201 18:51:06.817459  258306 cache.go:149] Downloading gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f to local cache
	I1201 18:51:06.817663  258306 image.go:63] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f in local cache directory
	I1201 18:51:06.817758  258306 image.go:118] Writing gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f to local cache
	I1201 18:51:06.856739  258306 preload.go:119] Found remote preload: https://storage.googleapis.com/minikube-preloaded-volume-tarballs/v18/v1.16.0/preloaded-images-k8s-v18-v1.16.0-containerd-overlay2-arm64.tar.lz4
	I1201 18:51:06.856768  258306 cache.go:56] Caching tarball of preloaded images
	I1201 18:51:06.856951  258306 preload.go:132] Checking if preload exists for k8s version v1.16.0 and runtime containerd
	I1201 18:51:06.859324  258306 out.go:97] Downloading Kubernetes v1.16.0 preload ...
	I1201 18:51:06.859356  258306 preload.go:238] getting checksum for preloaded-images-k8s-v18-v1.16.0-containerd-overlay2-arm64.tar.lz4 ...
	I1201 18:51:06.971855  258306 download.go:107] Downloading: https://storage.googleapis.com/minikube-preloaded-volume-tarballs/v18/v1.16.0/preloaded-images-k8s-v18-v1.16.0-containerd-overlay2-arm64.tar.lz4?checksum=md5:1f1e2324dbd6e4f3d8734226d9194e9f -> /home/jenkins/minikube-integration/17703-252966/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.16.0-containerd-overlay2-arm64.tar.lz4
	I1201 18:51:12.727574  258306 cache.go:152] successfully saved gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f as a tarball
	I1201 18:51:21.123796  258306 preload.go:249] saving checksum for preloaded-images-k8s-v18-v1.16.0-containerd-overlay2-arm64.tar.lz4 ...
	I1201 18:51:21.123945  258306 preload.go:256] verifying checksum of /home/jenkins/minikube-integration/17703-252966/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.16.0-containerd-overlay2-arm64.tar.lz4 ...
	
	* 
	* The control plane node "" does not exist.
	  To start a cluster, run: "minikube start -p download-only-609102"

                                                
                                                
-- /stdout --
aaa_download_only_test.go:173: minikube logs failed with error: exit status 85
--- PASS: TestDownloadOnly/v1.16.0/LogsDuration (0.09s)

                                                
                                    
x
+
TestDownloadOnly/v1.28.4/json-events (16.74s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.28.4/json-events
aaa_download_only_test.go:69: (dbg) Run:  out/minikube-linux-arm64 start -o=json --download-only -p download-only-609102 --force --alsologtostderr --kubernetes-version=v1.28.4 --container-runtime=containerd --driver=docker  --container-runtime=containerd
aaa_download_only_test.go:69: (dbg) Done: out/minikube-linux-arm64 start -o=json --download-only -p download-only-609102 --force --alsologtostderr --kubernetes-version=v1.28.4 --container-runtime=containerd --driver=docker  --container-runtime=containerd: (16.737362207s)
--- PASS: TestDownloadOnly/v1.28.4/json-events (16.74s)

                                                
                                    
x
+
TestDownloadOnly/v1.28.4/preload-exists (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.28.4/preload-exists
--- PASS: TestDownloadOnly/v1.28.4/preload-exists (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.28.4/LogsDuration (0.1s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.28.4/LogsDuration
aaa_download_only_test.go:172: (dbg) Run:  out/minikube-linux-arm64 logs -p download-only-609102
aaa_download_only_test.go:172: (dbg) Non-zero exit: out/minikube-linux-arm64 logs -p download-only-609102: exit status 85 (98.312212ms)

                                                
                                                
-- stdout --
	* 
	* ==> Audit <==
	* |---------|--------------------------------|----------------------|---------|---------|---------------------|----------|
	| Command |              Args              |       Profile        |  User   | Version |     Start Time      | End Time |
	|---------|--------------------------------|----------------------|---------|---------|---------------------|----------|
	| start   | -o=json --download-only        | download-only-609102 | jenkins | v1.32.0 | 01 Dec 23 18:51 UTC |          |
	|         | -p download-only-609102        |                      |         |         |                     |          |
	|         | --force --alsologtostderr      |                      |         |         |                     |          |
	|         | --kubernetes-version=v1.16.0   |                      |         |         |                     |          |
	|         | --container-runtime=containerd |                      |         |         |                     |          |
	|         | --driver=docker                |                      |         |         |                     |          |
	|         | --container-runtime=containerd |                      |         |         |                     |          |
	| start   | -o=json --download-only        | download-only-609102 | jenkins | v1.32.0 | 01 Dec 23 18:51 UTC |          |
	|         | -p download-only-609102        |                      |         |         |                     |          |
	|         | --force --alsologtostderr      |                      |         |         |                     |          |
	|         | --kubernetes-version=v1.28.4   |                      |         |         |                     |          |
	|         | --container-runtime=containerd |                      |         |         |                     |          |
	|         | --driver=docker                |                      |         |         |                     |          |
	|         | --container-runtime=containerd |                      |         |         |                     |          |
	|---------|--------------------------------|----------------------|---------|---------|---------------------|----------|
	
	* 
	* ==> Last Start <==
	* Log file created at: 2023/12/01 18:51:25
	Running on machine: ip-172-31-31-251
	Binary: Built with gc go1.21.4 for linux/arm64
	Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
	I1201 18:51:25.354468  258383 out.go:296] Setting OutFile to fd 1 ...
	I1201 18:51:25.354600  258383 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I1201 18:51:25.354610  258383 out.go:309] Setting ErrFile to fd 2...
	I1201 18:51:25.354616  258383 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I1201 18:51:25.354859  258383 root.go:338] Updating PATH: /home/jenkins/minikube-integration/17703-252966/.minikube/bin
	W1201 18:51:25.354999  258383 root.go:314] Error reading config file at /home/jenkins/minikube-integration/17703-252966/.minikube/config/config.json: open /home/jenkins/minikube-integration/17703-252966/.minikube/config/config.json: no such file or directory
	I1201 18:51:25.355252  258383 out.go:303] Setting JSON to true
	I1201 18:51:25.356068  258383 start.go:128] hostinfo: {"hostname":"ip-172-31-31-251","uptime":5632,"bootTime":1701451054,"procs":168,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1050-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"982e3628-3742-4b3e-bb63-ac1b07660ec7"}
	I1201 18:51:25.356142  258383 start.go:138] virtualization:  
	I1201 18:51:25.358493  258383 out.go:97] [download-only-609102] minikube v1.32.0 on Ubuntu 20.04 (arm64)
	I1201 18:51:25.360627  258383 out.go:169] MINIKUBE_LOCATION=17703
	I1201 18:51:25.358833  258383 notify.go:220] Checking for updates...
	I1201 18:51:25.364511  258383 out.go:169] MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I1201 18:51:25.366234  258383 out.go:169] KUBECONFIG=/home/jenkins/minikube-integration/17703-252966/kubeconfig
	I1201 18:51:25.368154  258383 out.go:169] MINIKUBE_HOME=/home/jenkins/minikube-integration/17703-252966/.minikube
	I1201 18:51:25.370061  258383 out.go:169] MINIKUBE_BIN=out/minikube-linux-arm64
	W1201 18:51:25.373525  258383 out.go:272] minikube skips various validations when --force is supplied; this may lead to unexpected behavior
	I1201 18:51:25.374103  258383 config.go:182] Loaded profile config "download-only-609102": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.16.0
	W1201 18:51:25.374167  258383 start.go:810] api.Load failed for download-only-609102: filestore "download-only-609102": Docker machine "download-only-609102" does not exist. Use "docker-machine ls" to list machines. Use "docker-machine create" to add a new one.
	I1201 18:51:25.374272  258383 driver.go:392] Setting default libvirt URI to qemu:///system
	W1201 18:51:25.374300  258383 start.go:810] api.Load failed for download-only-609102: filestore "download-only-609102": Docker machine "download-only-609102" does not exist. Use "docker-machine ls" to list machines. Use "docker-machine create" to add a new one.
	I1201 18:51:25.400920  258383 docker.go:122] docker version: linux-24.0.7:Docker Engine - Community
	I1201 18:51:25.401041  258383 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I1201 18:51:25.486616  258383 info.go:266] docker info: {ID:EOU5:DNGX:XN6V:L2FZ:UXRM:5TWK:EVUR:KC2F:GT7Z:Y4O4:GB77:5PD3 Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:28 OomKillDisable:true NGoroutines:38 SystemTime:2023-12-01 18:51:25.476825995 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1050-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archi
tecture:aarch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8215040000 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-31-251 Labels:[] ExperimentalBuild:false ServerVersion:24.0.7 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f Expected:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f} RuncCommit:{ID:v1.1.10-0-g18a0cb0 Expected:v1.1.10-0-g18a0cb0} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.21.0]] Warnings:<nil>}}
	I1201 18:51:25.486716  258383 docker.go:295] overlay module found
	I1201 18:51:25.488879  258383 out.go:97] Using the docker driver based on existing profile
	I1201 18:51:25.488908  258383 start.go:298] selected driver: docker
	I1201 18:51:25.488915  258383 start.go:902] validating driver "docker" against &{Name:download-only-609102 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f Memory:2200 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.16.0 ClusterName:download-only-609102 Namespace:default APIServerName:
minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.16.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnet
ClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 AutoPauseInterval:1m0s GPUs:}
	I1201 18:51:25.489101  258383 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I1201 18:51:25.556271  258383 info.go:266] docker info: {ID:EOU5:DNGX:XN6V:L2FZ:UXRM:5TWK:EVUR:KC2F:GT7Z:Y4O4:GB77:5PD3 Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:28 OomKillDisable:true NGoroutines:38 SystemTime:2023-12-01 18:51:25.546609276 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1050-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archi
tecture:aarch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8215040000 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-31-251 Labels:[] ExperimentalBuild:false ServerVersion:24.0.7 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f Expected:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f} RuncCommit:{ID:v1.1.10-0-g18a0cb0 Expected:v1.1.10-0-g18a0cb0} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.21.0]] Warnings:<nil>}}
	I1201 18:51:25.556781  258383 cni.go:84] Creating CNI manager for ""
	I1201 18:51:25.556801  258383 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
	I1201 18:51:25.556816  258383 start_flags.go:323] config:
	{Name:download-only-609102 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f Memory:2200 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.4 ClusterName:download-only-609102 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRu
ntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.16.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 AutoPauseInter
val:1m0s GPUs:}
	I1201 18:51:25.558672  258383 out.go:97] Starting control plane node download-only-609102 in cluster download-only-609102
	I1201 18:51:25.558699  258383 cache.go:121] Beginning downloading kic base image for docker with containerd
	I1201 18:51:25.560574  258383 out.go:97] Pulling base image ...
	I1201 18:51:25.560601  258383 preload.go:132] Checking if preload exists for k8s version v1.28.4 and runtime containerd
	I1201 18:51:25.560651  258383 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f in local docker daemon
	I1201 18:51:25.578457  258383 cache.go:149] Downloading gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f to local cache
	I1201 18:51:25.578610  258383 image.go:63] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f in local cache directory
	I1201 18:51:25.578628  258383 image.go:66] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f in local cache directory, skipping pull
	I1201 18:51:25.578634  258383 image.go:105] gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f exists in cache, skipping pull
	I1201 18:51:25.578641  258383 cache.go:152] successfully saved gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f as a tarball
	I1201 18:51:25.629849  258383 preload.go:119] Found remote preload: https://storage.googleapis.com/minikube-preloaded-volume-tarballs/v18/v1.28.4/preloaded-images-k8s-v18-v1.28.4-containerd-overlay2-arm64.tar.lz4
	I1201 18:51:25.629878  258383 cache.go:56] Caching tarball of preloaded images
	I1201 18:51:25.630580  258383 preload.go:132] Checking if preload exists for k8s version v1.28.4 and runtime containerd
	I1201 18:51:25.632672  258383 out.go:97] Downloading Kubernetes v1.28.4 preload ...
	I1201 18:51:25.632697  258383 preload.go:238] getting checksum for preloaded-images-k8s-v18-v1.28.4-containerd-overlay2-arm64.tar.lz4 ...
	I1201 18:51:25.744726  258383 download.go:107] Downloading: https://storage.googleapis.com/minikube-preloaded-volume-tarballs/v18/v1.28.4/preloaded-images-k8s-v18-v1.28.4-containerd-overlay2-arm64.tar.lz4?checksum=md5:cc2d75db20c4d651f0460755d6df7b03 -> /home/jenkins/minikube-integration/17703-252966/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.4-containerd-overlay2-arm64.tar.lz4
	
	* 
	* The control plane node "" does not exist.
	  To start a cluster, run: "minikube start -p download-only-609102"

                                                
                                                
-- /stdout --
aaa_download_only_test.go:173: minikube logs failed with error: exit status 85
--- PASS: TestDownloadOnly/v1.28.4/LogsDuration (0.10s)

                                                
                                    
x
+
TestDownloadOnly/v1.29.0-rc.1/json-events (17.5s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.29.0-rc.1/json-events
aaa_download_only_test.go:69: (dbg) Run:  out/minikube-linux-arm64 start -o=json --download-only -p download-only-609102 --force --alsologtostderr --kubernetes-version=v1.29.0-rc.1 --container-runtime=containerd --driver=docker  --container-runtime=containerd
aaa_download_only_test.go:69: (dbg) Done: out/minikube-linux-arm64 start -o=json --download-only -p download-only-609102 --force --alsologtostderr --kubernetes-version=v1.29.0-rc.1 --container-runtime=containerd --driver=docker  --container-runtime=containerd: (17.498793255s)
--- PASS: TestDownloadOnly/v1.29.0-rc.1/json-events (17.50s)

                                                
                                    
x
+
TestDownloadOnly/v1.29.0-rc.1/preload-exists (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.29.0-rc.1/preload-exists
--- PASS: TestDownloadOnly/v1.29.0-rc.1/preload-exists (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.29.0-rc.1/LogsDuration (0.09s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.29.0-rc.1/LogsDuration
aaa_download_only_test.go:172: (dbg) Run:  out/minikube-linux-arm64 logs -p download-only-609102
aaa_download_only_test.go:172: (dbg) Non-zero exit: out/minikube-linux-arm64 logs -p download-only-609102: exit status 85 (85.90112ms)

                                                
                                                
-- stdout --
	* 
	* ==> Audit <==
	* |---------|-----------------------------------|----------------------|---------|---------|---------------------|----------|
	| Command |               Args                |       Profile        |  User   | Version |     Start Time      | End Time |
	|---------|-----------------------------------|----------------------|---------|---------|---------------------|----------|
	| start   | -o=json --download-only           | download-only-609102 | jenkins | v1.32.0 | 01 Dec 23 18:51 UTC |          |
	|         | -p download-only-609102           |                      |         |         |                     |          |
	|         | --force --alsologtostderr         |                      |         |         |                     |          |
	|         | --kubernetes-version=v1.16.0      |                      |         |         |                     |          |
	|         | --container-runtime=containerd    |                      |         |         |                     |          |
	|         | --driver=docker                   |                      |         |         |                     |          |
	|         | --container-runtime=containerd    |                      |         |         |                     |          |
	| start   | -o=json --download-only           | download-only-609102 | jenkins | v1.32.0 | 01 Dec 23 18:51 UTC |          |
	|         | -p download-only-609102           |                      |         |         |                     |          |
	|         | --force --alsologtostderr         |                      |         |         |                     |          |
	|         | --kubernetes-version=v1.28.4      |                      |         |         |                     |          |
	|         | --container-runtime=containerd    |                      |         |         |                     |          |
	|         | --driver=docker                   |                      |         |         |                     |          |
	|         | --container-runtime=containerd    |                      |         |         |                     |          |
	| start   | -o=json --download-only           | download-only-609102 | jenkins | v1.32.0 | 01 Dec 23 18:51 UTC |          |
	|         | -p download-only-609102           |                      |         |         |                     |          |
	|         | --force --alsologtostderr         |                      |         |         |                     |          |
	|         | --kubernetes-version=v1.29.0-rc.1 |                      |         |         |                     |          |
	|         | --container-runtime=containerd    |                      |         |         |                     |          |
	|         | --driver=docker                   |                      |         |         |                     |          |
	|         | --container-runtime=containerd    |                      |         |         |                     |          |
	|---------|-----------------------------------|----------------------|---------|---------|---------------------|----------|
	
	* 
	* ==> Last Start <==
	* Log file created at: 2023/12/01 18:51:42
	Running on machine: ip-172-31-31-251
	Binary: Built with gc go1.21.4 for linux/arm64
	Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
	I1201 18:51:42.209216  258458 out.go:296] Setting OutFile to fd 1 ...
	I1201 18:51:42.209476  258458 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I1201 18:51:42.209487  258458 out.go:309] Setting ErrFile to fd 2...
	I1201 18:51:42.209494  258458 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I1201 18:51:42.209811  258458 root.go:338] Updating PATH: /home/jenkins/minikube-integration/17703-252966/.minikube/bin
	W1201 18:51:42.209946  258458 root.go:314] Error reading config file at /home/jenkins/minikube-integration/17703-252966/.minikube/config/config.json: open /home/jenkins/minikube-integration/17703-252966/.minikube/config/config.json: no such file or directory
	I1201 18:51:42.210271  258458 out.go:303] Setting JSON to true
	I1201 18:51:42.211271  258458 start.go:128] hostinfo: {"hostname":"ip-172-31-31-251","uptime":5649,"bootTime":1701451054,"procs":168,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1050-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"982e3628-3742-4b3e-bb63-ac1b07660ec7"}
	I1201 18:51:42.211383  258458 start.go:138] virtualization:  
	I1201 18:51:42.213923  258458 out.go:97] [download-only-609102] minikube v1.32.0 on Ubuntu 20.04 (arm64)
	I1201 18:51:42.214314  258458 notify.go:220] Checking for updates...
	I1201 18:51:42.218699  258458 out.go:169] MINIKUBE_LOCATION=17703
	I1201 18:51:42.220860  258458 out.go:169] MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I1201 18:51:42.223034  258458 out.go:169] KUBECONFIG=/home/jenkins/minikube-integration/17703-252966/kubeconfig
	I1201 18:51:42.224934  258458 out.go:169] MINIKUBE_HOME=/home/jenkins/minikube-integration/17703-252966/.minikube
	I1201 18:51:42.226900  258458 out.go:169] MINIKUBE_BIN=out/minikube-linux-arm64
	W1201 18:51:42.230704  258458 out.go:272] minikube skips various validations when --force is supplied; this may lead to unexpected behavior
	I1201 18:51:42.231739  258458 config.go:182] Loaded profile config "download-only-609102": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.4
	W1201 18:51:42.231844  258458 start.go:810] api.Load failed for download-only-609102: filestore "download-only-609102": Docker machine "download-only-609102" does not exist. Use "docker-machine ls" to list machines. Use "docker-machine create" to add a new one.
	I1201 18:51:42.231981  258458 driver.go:392] Setting default libvirt URI to qemu:///system
	W1201 18:51:42.232024  258458 start.go:810] api.Load failed for download-only-609102: filestore "download-only-609102": Docker machine "download-only-609102" does not exist. Use "docker-machine ls" to list machines. Use "docker-machine create" to add a new one.
	I1201 18:51:42.276745  258458 docker.go:122] docker version: linux-24.0.7:Docker Engine - Community
	I1201 18:51:42.276887  258458 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I1201 18:51:42.359433  258458 info.go:266] docker info: {ID:EOU5:DNGX:XN6V:L2FZ:UXRM:5TWK:EVUR:KC2F:GT7Z:Y4O4:GB77:5PD3 Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:28 OomKillDisable:true NGoroutines:38 SystemTime:2023-12-01 18:51:42.348919426 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1050-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archi
tecture:aarch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8215040000 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-31-251 Labels:[] ExperimentalBuild:false ServerVersion:24.0.7 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f Expected:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f} RuncCommit:{ID:v1.1.10-0-g18a0cb0 Expected:v1.1.10-0-g18a0cb0} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.21.0]] Warnings:<nil>}}
	I1201 18:51:42.359548  258458 docker.go:295] overlay module found
	I1201 18:51:42.361314  258458 out.go:97] Using the docker driver based on existing profile
	I1201 18:51:42.361353  258458 start.go:298] selected driver: docker
	I1201 18:51:42.361361  258458 start.go:902] validating driver "docker" against &{Name:download-only-609102 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f Memory:2200 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.4 ClusterName:download-only-609102 Namespace:default APIServerName:
minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.28.4 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnet
ClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 AutoPauseInterval:1m0s GPUs:}
	I1201 18:51:42.361598  258458 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I1201 18:51:42.438899  258458 info.go:266] docker info: {ID:EOU5:DNGX:XN6V:L2FZ:UXRM:5TWK:EVUR:KC2F:GT7Z:Y4O4:GB77:5PD3 Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:28 OomKillDisable:true NGoroutines:38 SystemTime:2023-12-01 18:51:42.42911969 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1050-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archit
ecture:aarch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8215040000 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-31-251 Labels:[] ExperimentalBuild:false ServerVersion:24.0.7 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f Expected:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f} RuncCommit:{ID:v1.1.10-0-g18a0cb0 Expected:v1.1.10-0-g18a0cb0} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.21.0]] Warnings:<nil>}}
	I1201 18:51:42.439497  258458 cni.go:84] Creating CNI manager for ""
	I1201 18:51:42.439519  258458 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
	I1201 18:51:42.439535  258458 start_flags.go:323] config:
	{Name:download-only-609102 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f Memory:2200 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.29.0-rc.1 ClusterName:download-only-609102 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local Contai
nerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.28.4 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 AutoPause
Interval:1m0s GPUs:}
	I1201 18:51:42.441404  258458 out.go:97] Starting control plane node download-only-609102 in cluster download-only-609102
	I1201 18:51:42.441428  258458 cache.go:121] Beginning downloading kic base image for docker with containerd
	I1201 18:51:42.443032  258458 out.go:97] Pulling base image ...
	I1201 18:51:42.443054  258458 preload.go:132] Checking if preload exists for k8s version v1.29.0-rc.1 and runtime containerd
	I1201 18:51:42.443225  258458 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f in local docker daemon
	I1201 18:51:42.460708  258458 cache.go:149] Downloading gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f to local cache
	I1201 18:51:42.460845  258458 image.go:63] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f in local cache directory
	I1201 18:51:42.460865  258458 image.go:66] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f in local cache directory, skipping pull
	I1201 18:51:42.460871  258458 image.go:105] gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f exists in cache, skipping pull
	I1201 18:51:42.460881  258458 cache.go:152] successfully saved gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f as a tarball
	I1201 18:51:42.513233  258458 preload.go:119] Found remote preload: https://storage.googleapis.com/minikube-preloaded-volume-tarballs/v18/v1.29.0-rc.1/preloaded-images-k8s-v18-v1.29.0-rc.1-containerd-overlay2-arm64.tar.lz4
	I1201 18:51:42.513272  258458 cache.go:56] Caching tarball of preloaded images
	I1201 18:51:42.513437  258458 preload.go:132] Checking if preload exists for k8s version v1.29.0-rc.1 and runtime containerd
	I1201 18:51:42.515262  258458 out.go:97] Downloading Kubernetes v1.29.0-rc.1 preload ...
	I1201 18:51:42.515289  258458 preload.go:238] getting checksum for preloaded-images-k8s-v18-v1.29.0-rc.1-containerd-overlay2-arm64.tar.lz4 ...
	I1201 18:51:42.612923  258458 download.go:107] Downloading: https://storage.googleapis.com/minikube-preloaded-volume-tarballs/v18/v1.29.0-rc.1/preloaded-images-k8s-v18-v1.29.0-rc.1-containerd-overlay2-arm64.tar.lz4?checksum=md5:946a8555f0c7d6b7bf66dfada408cc8d -> /home/jenkins/minikube-integration/17703-252966/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.29.0-rc.1-containerd-overlay2-arm64.tar.lz4
	I1201 18:51:55.567265  258458 preload.go:249] saving checksum for preloaded-images-k8s-v18-v1.29.0-rc.1-containerd-overlay2-arm64.tar.lz4 ...
	I1201 18:51:55.567376  258458 preload.go:256] verifying checksum of /home/jenkins/minikube-integration/17703-252966/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.29.0-rc.1-containerd-overlay2-arm64.tar.lz4 ...
	I1201 18:51:56.437375  258458 cache.go:59] Finished verifying existence of preloaded tar for  v1.29.0-rc.1 on containerd
	I1201 18:51:56.437517  258458 profile.go:148] Saving config to /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/download-only-609102/config.json ...
	I1201 18:51:56.437743  258458 preload.go:132] Checking if preload exists for k8s version v1.29.0-rc.1 and runtime containerd
	I1201 18:51:56.438449  258458 download.go:107] Downloading: https://dl.k8s.io/release/v1.29.0-rc.1/bin/linux/arm64/kubectl?checksum=file:https://dl.k8s.io/release/v1.29.0-rc.1/bin/linux/arm64/kubectl.sha256 -> /home/jenkins/minikube-integration/17703-252966/.minikube/cache/linux/arm64/v1.29.0-rc.1/kubectl
	
	* 
	* The control plane node "" does not exist.
	  To start a cluster, run: "minikube start -p download-only-609102"

                                                
                                                
-- /stdout --
aaa_download_only_test.go:173: minikube logs failed with error: exit status 85
--- PASS: TestDownloadOnly/v1.29.0-rc.1/LogsDuration (0.09s)

                                                
                                    
x
+
TestDownloadOnly/DeleteAll (0.25s)

                                                
                                                
=== RUN   TestDownloadOnly/DeleteAll
aaa_download_only_test.go:190: (dbg) Run:  out/minikube-linux-arm64 delete --all
--- PASS: TestDownloadOnly/DeleteAll (0.25s)

                                                
                                    
x
+
TestDownloadOnly/DeleteAlwaysSucceeds (0.43s)

                                                
                                                
=== RUN   TestDownloadOnly/DeleteAlwaysSucceeds
aaa_download_only_test.go:202: (dbg) Run:  out/minikube-linux-arm64 delete -p download-only-609102
--- PASS: TestDownloadOnly/DeleteAlwaysSucceeds (0.43s)

                                                
                                    
x
+
TestBinaryMirror (0.67s)

                                                
                                                
=== RUN   TestBinaryMirror
aaa_download_only_test.go:307: (dbg) Run:  out/minikube-linux-arm64 start --download-only -p binary-mirror-036619 --alsologtostderr --binary-mirror http://127.0.0.1:46513 --driver=docker  --container-runtime=containerd
helpers_test.go:175: Cleaning up "binary-mirror-036619" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p binary-mirror-036619
--- PASS: TestBinaryMirror (0.67s)

                                                
                                    
x
+
TestAddons/PreSetup/EnablingAddonOnNonExistingCluster (0.1s)

                                                
                                                
=== RUN   TestAddons/PreSetup/EnablingAddonOnNonExistingCluster
=== PAUSE TestAddons/PreSetup/EnablingAddonOnNonExistingCluster

                                                
                                                

                                                
                                                
=== CONT  TestAddons/PreSetup/EnablingAddonOnNonExistingCluster
addons_test.go:927: (dbg) Run:  out/minikube-linux-arm64 addons enable dashboard -p addons-488129
addons_test.go:927: (dbg) Non-zero exit: out/minikube-linux-arm64 addons enable dashboard -p addons-488129: exit status 85 (95.571506ms)

                                                
                                                
-- stdout --
	* Profile "addons-488129" not found. Run "minikube profile list" to view all profiles.
	  To start a cluster, run: "minikube start -p addons-488129"

                                                
                                                
-- /stdout --
--- PASS: TestAddons/PreSetup/EnablingAddonOnNonExistingCluster (0.10s)

                                                
                                    
x
+
TestAddons/PreSetup/DisablingAddonOnNonExistingCluster (0.1s)

                                                
                                                
=== RUN   TestAddons/PreSetup/DisablingAddonOnNonExistingCluster
=== PAUSE TestAddons/PreSetup/DisablingAddonOnNonExistingCluster

                                                
                                                

                                                
                                                
=== CONT  TestAddons/PreSetup/DisablingAddonOnNonExistingCluster
addons_test.go:938: (dbg) Run:  out/minikube-linux-arm64 addons disable dashboard -p addons-488129
addons_test.go:938: (dbg) Non-zero exit: out/minikube-linux-arm64 addons disable dashboard -p addons-488129: exit status 85 (96.797023ms)

                                                
                                                
-- stdout --
	* Profile "addons-488129" not found. Run "minikube profile list" to view all profiles.
	  To start a cluster, run: "minikube start -p addons-488129"

                                                
                                                
-- /stdout --
--- PASS: TestAddons/PreSetup/DisablingAddonOnNonExistingCluster (0.10s)

                                                
                                    
x
+
TestAddons/Setup (142.33s)

                                                
                                                
=== RUN   TestAddons/Setup
addons_test.go:109: (dbg) Run:  out/minikube-linux-arm64 start -p addons-488129 --wait=true --memory=4000 --alsologtostderr --addons=registry --addons=metrics-server --addons=volumesnapshots --addons=csi-hostpath-driver --addons=gcp-auth --addons=cloud-spanner --addons=inspektor-gadget --addons=storage-provisioner-rancher --addons=nvidia-device-plugin --driver=docker  --container-runtime=containerd --addons=ingress --addons=ingress-dns
addons_test.go:109: (dbg) Done: out/minikube-linux-arm64 start -p addons-488129 --wait=true --memory=4000 --alsologtostderr --addons=registry --addons=metrics-server --addons=volumesnapshots --addons=csi-hostpath-driver --addons=gcp-auth --addons=cloud-spanner --addons=inspektor-gadget --addons=storage-provisioner-rancher --addons=nvidia-device-plugin --driver=docker  --container-runtime=containerd --addons=ingress --addons=ingress-dns: (2m22.33364339s)
--- PASS: TestAddons/Setup (142.33s)

                                                
                                    
x
+
TestAddons/parallel/Registry (15.22s)

                                                
                                                
=== RUN   TestAddons/parallel/Registry
=== PAUSE TestAddons/parallel/Registry

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/Registry
addons_test.go:329: registry stabilized in 61.566708ms
addons_test.go:331: (dbg) TestAddons/parallel/Registry: waiting 6m0s for pods matching "actual-registry=true" in namespace "kube-system" ...
helpers_test.go:344: "registry-rc92j" [2efe823d-0a41-44bf-8c9f-092cbf70ad2d] Running
addons_test.go:331: (dbg) TestAddons/parallel/Registry: actual-registry=true healthy within 5.022706328s
addons_test.go:334: (dbg) TestAddons/parallel/Registry: waiting 10m0s for pods matching "registry-proxy=true" in namespace "kube-system" ...
helpers_test.go:344: "registry-proxy-wbp5h" [fbc66db0-d824-4235-9b7f-01ca7a98db44] Running
addons_test.go:334: (dbg) TestAddons/parallel/Registry: registry-proxy=true healthy within 5.016036776s
addons_test.go:339: (dbg) Run:  kubectl --context addons-488129 delete po -l run=registry-test --now
addons_test.go:344: (dbg) Run:  kubectl --context addons-488129 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c "wget --spider -S http://registry.kube-system.svc.cluster.local"
addons_test.go:344: (dbg) Done: kubectl --context addons-488129 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c "wget --spider -S http://registry.kube-system.svc.cluster.local": (3.852008081s)
addons_test.go:358: (dbg) Run:  out/minikube-linux-arm64 -p addons-488129 ip
2023/12/01 18:54:38 [DEBUG] GET http://192.168.49.2:5000
addons_test.go:387: (dbg) Run:  out/minikube-linux-arm64 -p addons-488129 addons disable registry --alsologtostderr -v=1
--- PASS: TestAddons/parallel/Registry (15.22s)

                                                
                                    
x
+
TestAddons/parallel/InspektorGadget (11.04s)

                                                
                                                
=== RUN   TestAddons/parallel/InspektorGadget
=== PAUSE TestAddons/parallel/InspektorGadget

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/InspektorGadget
addons_test.go:837: (dbg) TestAddons/parallel/InspektorGadget: waiting 8m0s for pods matching "k8s-app=gadget" in namespace "gadget" ...
helpers_test.go:344: "gadget-qbdmc" [5ee52c59-7e52-49fc-85f2-abc088ec9a88] Running
addons_test.go:837: (dbg) TestAddons/parallel/InspektorGadget: k8s-app=gadget healthy within 5.0146892s
addons_test.go:840: (dbg) Run:  out/minikube-linux-arm64 addons disable inspektor-gadget -p addons-488129
addons_test.go:840: (dbg) Done: out/minikube-linux-arm64 addons disable inspektor-gadget -p addons-488129: (6.026972184s)
--- PASS: TestAddons/parallel/InspektorGadget (11.04s)

                                                
                                    
x
+
TestAddons/parallel/MetricsServer (6.18s)

                                                
                                                
=== RUN   TestAddons/parallel/MetricsServer
=== PAUSE TestAddons/parallel/MetricsServer

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/MetricsServer
addons_test.go:406: metrics-server stabilized in 4.515813ms
addons_test.go:408: (dbg) TestAddons/parallel/MetricsServer: waiting 6m0s for pods matching "k8s-app=metrics-server" in namespace "kube-system" ...
helpers_test.go:344: "metrics-server-7c66d45ddc-zppw9" [f20afbef-3118-48da-90f0-2c4f64e46b0b] Running
addons_test.go:408: (dbg) TestAddons/parallel/MetricsServer: k8s-app=metrics-server healthy within 5.014104639s
addons_test.go:414: (dbg) Run:  kubectl --context addons-488129 top pods -n kube-system
addons_test.go:431: (dbg) Run:  out/minikube-linux-arm64 -p addons-488129 addons disable metrics-server --alsologtostderr -v=1
addons_test.go:431: (dbg) Done: out/minikube-linux-arm64 -p addons-488129 addons disable metrics-server --alsologtostderr -v=1: (1.055525444s)
--- PASS: TestAddons/parallel/MetricsServer (6.18s)

                                                
                                    
x
+
TestAddons/parallel/CSI (59.99s)

                                                
                                                
=== RUN   TestAddons/parallel/CSI
=== PAUSE TestAddons/parallel/CSI

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/CSI
addons_test.go:560: csi-hostpath-driver pods stabilized in 61.799184ms
addons_test.go:563: (dbg) Run:  kubectl --context addons-488129 create -f testdata/csi-hostpath-driver/pvc.yaml
addons_test.go:568: (dbg) TestAddons/parallel/CSI: waiting 6m0s for pvc "hpvc" in namespace "default" ...
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc hpvc -o jsonpath={.status.phase} -n default
addons_test.go:573: (dbg) Run:  kubectl --context addons-488129 create -f testdata/csi-hostpath-driver/pv-pod.yaml
addons_test.go:578: (dbg) TestAddons/parallel/CSI: waiting 6m0s for pods matching "app=task-pv-pod" in namespace "default" ...
helpers_test.go:344: "task-pv-pod" [971afe9b-c20a-4e8c-9e3b-2efedb6a3a5a] Pending
helpers_test.go:344: "task-pv-pod" [971afe9b-c20a-4e8c-9e3b-2efedb6a3a5a] Pending / Ready:ContainersNotReady (containers with unready status: [task-pv-container]) / ContainersReady:ContainersNotReady (containers with unready status: [task-pv-container])
helpers_test.go:344: "task-pv-pod" [971afe9b-c20a-4e8c-9e3b-2efedb6a3a5a] Running
addons_test.go:578: (dbg) TestAddons/parallel/CSI: app=task-pv-pod healthy within 10.024545529s
addons_test.go:583: (dbg) Run:  kubectl --context addons-488129 create -f testdata/csi-hostpath-driver/snapshot.yaml
addons_test.go:588: (dbg) TestAddons/parallel/CSI: waiting 6m0s for volume snapshot "new-snapshot-demo" in namespace "default" ...
helpers_test.go:419: (dbg) Run:  kubectl --context addons-488129 get volumesnapshot new-snapshot-demo -o jsonpath={.status.readyToUse} -n default
helpers_test.go:419: (dbg) Run:  kubectl --context addons-488129 get volumesnapshot new-snapshot-demo -o jsonpath={.status.readyToUse} -n default
helpers_test.go:419: (dbg) Run:  kubectl --context addons-488129 get volumesnapshot new-snapshot-demo -o jsonpath={.status.readyToUse} -n default
addons_test.go:593: (dbg) Run:  kubectl --context addons-488129 delete pod task-pv-pod
addons_test.go:599: (dbg) Run:  kubectl --context addons-488129 delete pvc hpvc
addons_test.go:605: (dbg) Run:  kubectl --context addons-488129 create -f testdata/csi-hostpath-driver/pvc-restore.yaml
addons_test.go:610: (dbg) TestAddons/parallel/CSI: waiting 6m0s for pvc "hpvc-restore" in namespace "default" ...
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
addons_test.go:615: (dbg) Run:  kubectl --context addons-488129 create -f testdata/csi-hostpath-driver/pv-pod-restore.yaml
addons_test.go:620: (dbg) TestAddons/parallel/CSI: waiting 6m0s for pods matching "app=task-pv-pod-restore" in namespace "default" ...
helpers_test.go:344: "task-pv-pod-restore" [071d29e0-2564-47d3-9a78-5cae24c5384e] Pending
helpers_test.go:344: "task-pv-pod-restore" [071d29e0-2564-47d3-9a78-5cae24c5384e] Pending / Ready:ContainersNotReady (containers with unready status: [task-pv-container]) / ContainersReady:ContainersNotReady (containers with unready status: [task-pv-container])
helpers_test.go:344: "task-pv-pod-restore" [071d29e0-2564-47d3-9a78-5cae24c5384e] Running
addons_test.go:620: (dbg) TestAddons/parallel/CSI: app=task-pv-pod-restore healthy within 7.024324163s
addons_test.go:625: (dbg) Run:  kubectl --context addons-488129 delete pod task-pv-pod-restore
addons_test.go:625: (dbg) Done: kubectl --context addons-488129 delete pod task-pv-pod-restore: (1.061793939s)
addons_test.go:629: (dbg) Run:  kubectl --context addons-488129 delete pvc hpvc-restore
addons_test.go:633: (dbg) Run:  kubectl --context addons-488129 delete volumesnapshot new-snapshot-demo
addons_test.go:637: (dbg) Run:  out/minikube-linux-arm64 -p addons-488129 addons disable csi-hostpath-driver --alsologtostderr -v=1
addons_test.go:637: (dbg) Done: out/minikube-linux-arm64 -p addons-488129 addons disable csi-hostpath-driver --alsologtostderr -v=1: (6.854465665s)
addons_test.go:641: (dbg) Run:  out/minikube-linux-arm64 -p addons-488129 addons disable volumesnapshots --alsologtostderr -v=1
--- PASS: TestAddons/parallel/CSI (59.99s)

                                                
                                    
x
+
TestAddons/parallel/Headlamp (12.65s)

                                                
                                                
=== RUN   TestAddons/parallel/Headlamp
=== PAUSE TestAddons/parallel/Headlamp

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/Headlamp
addons_test.go:823: (dbg) Run:  out/minikube-linux-arm64 addons enable headlamp -p addons-488129 --alsologtostderr -v=1
addons_test.go:823: (dbg) Done: out/minikube-linux-arm64 addons enable headlamp -p addons-488129 --alsologtostderr -v=1: (1.620150589s)
addons_test.go:828: (dbg) TestAddons/parallel/Headlamp: waiting 8m0s for pods matching "app.kubernetes.io/name=headlamp" in namespace "headlamp" ...
helpers_test.go:344: "headlamp-777fd4b855-n5kz2" [f065637a-a928-46e1-9e65-1df8f2420e1c] Pending
helpers_test.go:344: "headlamp-777fd4b855-n5kz2" [f065637a-a928-46e1-9e65-1df8f2420e1c] Pending / Ready:ContainersNotReady (containers with unready status: [headlamp]) / ContainersReady:ContainersNotReady (containers with unready status: [headlamp])
helpers_test.go:344: "headlamp-777fd4b855-n5kz2" [f065637a-a928-46e1-9e65-1df8f2420e1c] Running
addons_test.go:828: (dbg) TestAddons/parallel/Headlamp: app.kubernetes.io/name=headlamp healthy within 11.031586737s
--- PASS: TestAddons/parallel/Headlamp (12.65s)

                                                
                                    
x
+
TestAddons/parallel/CloudSpanner (5.71s)

                                                
                                                
=== RUN   TestAddons/parallel/CloudSpanner
=== PAUSE TestAddons/parallel/CloudSpanner

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/CloudSpanner
addons_test.go:856: (dbg) TestAddons/parallel/CloudSpanner: waiting 6m0s for pods matching "app=cloud-spanner-emulator" in namespace "default" ...
helpers_test.go:344: "cloud-spanner-emulator-5649c69bf6-pd4m9" [58465c6c-6961-4763-b520-0e27e7c20562] Running
addons_test.go:856: (dbg) TestAddons/parallel/CloudSpanner: app=cloud-spanner-emulator healthy within 5.01127523s
addons_test.go:859: (dbg) Run:  out/minikube-linux-arm64 addons disable cloud-spanner -p addons-488129
--- PASS: TestAddons/parallel/CloudSpanner (5.71s)

                                                
                                    
x
+
TestAddons/parallel/LocalPath (57.17s)

                                                
                                                
=== RUN   TestAddons/parallel/LocalPath
=== PAUSE TestAddons/parallel/LocalPath

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/LocalPath
addons_test.go:872: (dbg) Run:  kubectl --context addons-488129 apply -f testdata/storage-provisioner-rancher/pvc.yaml
addons_test.go:878: (dbg) Run:  kubectl --context addons-488129 apply -f testdata/storage-provisioner-rancher/pod.yaml
addons_test.go:882: (dbg) TestAddons/parallel/LocalPath: waiting 5m0s for pvc "test-pvc" in namespace "default" ...
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-488129 get pvc test-pvc -o jsonpath={.status.phase} -n default
addons_test.go:885: (dbg) TestAddons/parallel/LocalPath: waiting 3m0s for pods matching "run=test-local-path" in namespace "default" ...
helpers_test.go:344: "test-local-path" [b486ca15-aca3-4d0e-9cc9-624e6c169b45] Pending
helpers_test.go:344: "test-local-path" [b486ca15-aca3-4d0e-9cc9-624e6c169b45] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:344: "test-local-path" [b486ca15-aca3-4d0e-9cc9-624e6c169b45] Pending: Initialized:PodCompleted / Ready:PodCompleted / ContainersReady:PodCompleted
helpers_test.go:344: "test-local-path" [b486ca15-aca3-4d0e-9cc9-624e6c169b45] Succeeded: Initialized:PodCompleted / Ready:PodCompleted / ContainersReady:PodCompleted
addons_test.go:885: (dbg) TestAddons/parallel/LocalPath: run=test-local-path healthy within 8.009611245s
addons_test.go:890: (dbg) Run:  kubectl --context addons-488129 get pvc test-pvc -o=json
addons_test.go:899: (dbg) Run:  out/minikube-linux-arm64 -p addons-488129 ssh "cat /opt/local-path-provisioner/pvc-a606cbf4-5360-4438-af2b-a05f87cae59e_default_test-pvc/file1"
addons_test.go:911: (dbg) Run:  kubectl --context addons-488129 delete pod test-local-path
addons_test.go:915: (dbg) Run:  kubectl --context addons-488129 delete pvc test-pvc
addons_test.go:919: (dbg) Run:  out/minikube-linux-arm64 -p addons-488129 addons disable storage-provisioner-rancher --alsologtostderr -v=1
addons_test.go:919: (dbg) Done: out/minikube-linux-arm64 -p addons-488129 addons disable storage-provisioner-rancher --alsologtostderr -v=1: (43.476011612s)
--- PASS: TestAddons/parallel/LocalPath (57.17s)

                                                
                                    
x
+
TestAddons/parallel/NvidiaDevicePlugin (5.72s)

                                                
                                                
=== RUN   TestAddons/parallel/NvidiaDevicePlugin
=== PAUSE TestAddons/parallel/NvidiaDevicePlugin

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/NvidiaDevicePlugin
addons_test.go:951: (dbg) TestAddons/parallel/NvidiaDevicePlugin: waiting 6m0s for pods matching "name=nvidia-device-plugin-ds" in namespace "kube-system" ...
helpers_test.go:344: "nvidia-device-plugin-daemonset-c8qcb" [d66b677c-4402-4f2d-972e-f1971fa0edab] Running
addons_test.go:951: (dbg) TestAddons/parallel/NvidiaDevicePlugin: name=nvidia-device-plugin-ds healthy within 5.016643839s
addons_test.go:954: (dbg) Run:  out/minikube-linux-arm64 addons disable nvidia-device-plugin -p addons-488129
--- PASS: TestAddons/parallel/NvidiaDevicePlugin (5.72s)

                                                
                                    
x
+
TestAddons/serial/GCPAuth/Namespaces (0.19s)

                                                
                                                
=== RUN   TestAddons/serial/GCPAuth/Namespaces
addons_test.go:649: (dbg) Run:  kubectl --context addons-488129 create ns new-namespace
addons_test.go:663: (dbg) Run:  kubectl --context addons-488129 get secret gcp-auth -n new-namespace
--- PASS: TestAddons/serial/GCPAuth/Namespaces (0.19s)

                                                
                                    
x
+
TestAddons/StoppedEnableDisable (12.65s)

                                                
                                                
=== RUN   TestAddons/StoppedEnableDisable
addons_test.go:171: (dbg) Run:  out/minikube-linux-arm64 stop -p addons-488129
addons_test.go:171: (dbg) Done: out/minikube-linux-arm64 stop -p addons-488129: (12.317813931s)
addons_test.go:175: (dbg) Run:  out/minikube-linux-arm64 addons enable dashboard -p addons-488129
addons_test.go:179: (dbg) Run:  out/minikube-linux-arm64 addons disable dashboard -p addons-488129
addons_test.go:184: (dbg) Run:  out/minikube-linux-arm64 addons disable gvisor -p addons-488129
--- PASS: TestAddons/StoppedEnableDisable (12.65s)

                                                
                                    
x
+
TestCertOptions (37.56s)

                                                
                                                
=== RUN   TestCertOptions
=== PAUSE TestCertOptions

                                                
                                                

                                                
                                                
=== CONT  TestCertOptions
cert_options_test.go:49: (dbg) Run:  out/minikube-linux-arm64 start -p cert-options-911679 --memory=2048 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=docker  --container-runtime=containerd
cert_options_test.go:49: (dbg) Done: out/minikube-linux-arm64 start -p cert-options-911679 --memory=2048 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=docker  --container-runtime=containerd: (34.700187243s)
cert_options_test.go:60: (dbg) Run:  out/minikube-linux-arm64 -p cert-options-911679 ssh "openssl x509 -text -noout -in /var/lib/minikube/certs/apiserver.crt"
cert_options_test.go:88: (dbg) Run:  kubectl --context cert-options-911679 config view
cert_options_test.go:100: (dbg) Run:  out/minikube-linux-arm64 ssh -p cert-options-911679 -- "sudo cat /etc/kubernetes/admin.conf"
helpers_test.go:175: Cleaning up "cert-options-911679" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p cert-options-911679
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p cert-options-911679: (2.095113837s)
--- PASS: TestCertOptions (37.56s)

                                                
                                    
x
+
TestCertExpiration (225.27s)

                                                
                                                
=== RUN   TestCertExpiration
=== PAUSE TestCertExpiration

                                                
                                                

                                                
                                                
=== CONT  TestCertExpiration
cert_options_test.go:123: (dbg) Run:  out/minikube-linux-arm64 start -p cert-expiration-813229 --memory=2048 --cert-expiration=3m --driver=docker  --container-runtime=containerd
cert_options_test.go:123: (dbg) Done: out/minikube-linux-arm64 start -p cert-expiration-813229 --memory=2048 --cert-expiration=3m --driver=docker  --container-runtime=containerd: (36.825547928s)
cert_options_test.go:131: (dbg) Run:  out/minikube-linux-arm64 start -p cert-expiration-813229 --memory=2048 --cert-expiration=8760h --driver=docker  --container-runtime=containerd
cert_options_test.go:131: (dbg) Done: out/minikube-linux-arm64 start -p cert-expiration-813229 --memory=2048 --cert-expiration=8760h --driver=docker  --container-runtime=containerd: (6.030994366s)
helpers_test.go:175: Cleaning up "cert-expiration-813229" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p cert-expiration-813229
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p cert-expiration-813229: (2.416277551s)
--- PASS: TestCertExpiration (225.27s)

                                                
                                    
x
+
TestForceSystemdFlag (35.52s)

                                                
                                                
=== RUN   TestForceSystemdFlag
=== PAUSE TestForceSystemdFlag

                                                
                                                

                                                
                                                
=== CONT  TestForceSystemdFlag
docker_test.go:91: (dbg) Run:  out/minikube-linux-arm64 start -p force-systemd-flag-173130 --memory=2048 --force-systemd --alsologtostderr -v=5 --driver=docker  --container-runtime=containerd
docker_test.go:91: (dbg) Done: out/minikube-linux-arm64 start -p force-systemd-flag-173130 --memory=2048 --force-systemd --alsologtostderr -v=5 --driver=docker  --container-runtime=containerd: (33.080244676s)
docker_test.go:121: (dbg) Run:  out/minikube-linux-arm64 -p force-systemd-flag-173130 ssh "cat /etc/containerd/config.toml"
helpers_test.go:175: Cleaning up "force-systemd-flag-173130" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p force-systemd-flag-173130
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p force-systemd-flag-173130: (2.064902299s)
--- PASS: TestForceSystemdFlag (35.52s)

                                                
                                    
x
+
TestForceSystemdEnv (42.1s)

                                                
                                                
=== RUN   TestForceSystemdEnv
=== PAUSE TestForceSystemdEnv

                                                
                                                

                                                
                                                
=== CONT  TestForceSystemdEnv
docker_test.go:155: (dbg) Run:  out/minikube-linux-arm64 start -p force-systemd-env-068622 --memory=2048 --alsologtostderr -v=5 --driver=docker  --container-runtime=containerd
E1201 19:29:24.386135  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.crt: no such file or directory
docker_test.go:155: (dbg) Done: out/minikube-linux-arm64 start -p force-systemd-env-068622 --memory=2048 --alsologtostderr -v=5 --driver=docker  --container-runtime=containerd: (39.37004084s)
docker_test.go:121: (dbg) Run:  out/minikube-linux-arm64 -p force-systemd-env-068622 ssh "cat /etc/containerd/config.toml"
helpers_test.go:175: Cleaning up "force-systemd-env-068622" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p force-systemd-env-068622
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p force-systemd-env-068622: (2.279238897s)
--- PASS: TestForceSystemdEnv (42.10s)

                                                
                                    
x
+
TestDockerEnvContainerd (49.07s)

                                                
                                                
=== RUN   TestDockerEnvContainerd
docker_test.go:170: running with containerd true linux arm64
docker_test.go:181: (dbg) Run:  out/minikube-linux-arm64 start -p dockerenv-321516 --driver=docker  --container-runtime=containerd
docker_test.go:181: (dbg) Done: out/minikube-linux-arm64 start -p dockerenv-321516 --driver=docker  --container-runtime=containerd: (32.421554651s)
docker_test.go:189: (dbg) Run:  /bin/bash -c "out/minikube-linux-arm64 docker-env --ssh-host --ssh-add -p dockerenv-321516"
docker_test.go:189: (dbg) Done: /bin/bash -c "out/minikube-linux-arm64 docker-env --ssh-host --ssh-add -p dockerenv-321516": (1.442705303s)
docker_test.go:220: (dbg) Run:  /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-EfzG2hdRVJS5/agent.276099" SSH_AGENT_PID="276100" DOCKER_HOST=ssh://docker@127.0.0.1:33088 docker version"
docker_test.go:243: (dbg) Run:  /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-EfzG2hdRVJS5/agent.276099" SSH_AGENT_PID="276100" DOCKER_HOST=ssh://docker@127.0.0.1:33088 DOCKER_BUILDKIT=0 docker build -t local/minikube-dockerenv-containerd-test:latest testdata/docker-env"
docker_test.go:243: (dbg) Done: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-EfzG2hdRVJS5/agent.276099" SSH_AGENT_PID="276100" DOCKER_HOST=ssh://docker@127.0.0.1:33088 DOCKER_BUILDKIT=0 docker build -t local/minikube-dockerenv-containerd-test:latest testdata/docker-env": (1.818401127s)
docker_test.go:250: (dbg) Run:  /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-EfzG2hdRVJS5/agent.276099" SSH_AGENT_PID="276100" DOCKER_HOST=ssh://docker@127.0.0.1:33088 docker image ls"
helpers_test.go:175: Cleaning up "dockerenv-321516" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p dockerenv-321516
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p dockerenv-321516: (2.072593885s)
--- PASS: TestDockerEnvContainerd (49.07s)

                                                
                                    
x
+
TestErrorSpam/setup (33.3s)

                                                
                                                
=== RUN   TestErrorSpam/setup
error_spam_test.go:81: (dbg) Run:  out/minikube-linux-arm64 start -p nospam-163628 -n=1 --memory=2250 --wait=false --log_dir=/tmp/nospam-163628 --driver=docker  --container-runtime=containerd
error_spam_test.go:81: (dbg) Done: out/minikube-linux-arm64 start -p nospam-163628 -n=1 --memory=2250 --wait=false --log_dir=/tmp/nospam-163628 --driver=docker  --container-runtime=containerd: (33.296669416s)
--- PASS: TestErrorSpam/setup (33.30s)

                                                
                                    
x
+
TestErrorSpam/start (0.93s)

                                                
                                                
=== RUN   TestErrorSpam/start
error_spam_test.go:216: Cleaning up 1 logfile(s) ...
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-arm64 -p nospam-163628 --log_dir /tmp/nospam-163628 start --dry-run
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-arm64 -p nospam-163628 --log_dir /tmp/nospam-163628 start --dry-run
error_spam_test.go:182: (dbg) Run:  out/minikube-linux-arm64 -p nospam-163628 --log_dir /tmp/nospam-163628 start --dry-run
--- PASS: TestErrorSpam/start (0.93s)

                                                
                                    
x
+
TestErrorSpam/status (1.14s)

                                                
                                                
=== RUN   TestErrorSpam/status
error_spam_test.go:216: Cleaning up 0 logfile(s) ...
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-arm64 -p nospam-163628 --log_dir /tmp/nospam-163628 status
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-arm64 -p nospam-163628 --log_dir /tmp/nospam-163628 status
error_spam_test.go:182: (dbg) Run:  out/minikube-linux-arm64 -p nospam-163628 --log_dir /tmp/nospam-163628 status
--- PASS: TestErrorSpam/status (1.14s)

                                                
                                    
x
+
TestErrorSpam/pause (1.91s)

                                                
                                                
=== RUN   TestErrorSpam/pause
error_spam_test.go:216: Cleaning up 0 logfile(s) ...
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-arm64 -p nospam-163628 --log_dir /tmp/nospam-163628 pause
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-arm64 -p nospam-163628 --log_dir /tmp/nospam-163628 pause
error_spam_test.go:182: (dbg) Run:  out/minikube-linux-arm64 -p nospam-163628 --log_dir /tmp/nospam-163628 pause
--- PASS: TestErrorSpam/pause (1.91s)

                                                
                                    
x
+
TestErrorSpam/unpause (2.14s)

                                                
                                                
=== RUN   TestErrorSpam/unpause
error_spam_test.go:216: Cleaning up 0 logfile(s) ...
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-arm64 -p nospam-163628 --log_dir /tmp/nospam-163628 unpause
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-arm64 -p nospam-163628 --log_dir /tmp/nospam-163628 unpause
error_spam_test.go:182: (dbg) Run:  out/minikube-linux-arm64 -p nospam-163628 --log_dir /tmp/nospam-163628 unpause
--- PASS: TestErrorSpam/unpause (2.14s)

                                                
                                    
x
+
TestErrorSpam/stop (1.51s)

                                                
                                                
=== RUN   TestErrorSpam/stop
error_spam_test.go:216: Cleaning up 0 logfile(s) ...
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-arm64 -p nospam-163628 --log_dir /tmp/nospam-163628 stop
error_spam_test.go:159: (dbg) Done: out/minikube-linux-arm64 -p nospam-163628 --log_dir /tmp/nospam-163628 stop: (1.259194115s)
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-arm64 -p nospam-163628 --log_dir /tmp/nospam-163628 stop
error_spam_test.go:182: (dbg) Run:  out/minikube-linux-arm64 -p nospam-163628 --log_dir /tmp/nospam-163628 stop
--- PASS: TestErrorSpam/stop (1.51s)

                                                
                                    
x
+
TestFunctional/serial/CopySyncFile (0s)

                                                
                                                
=== RUN   TestFunctional/serial/CopySyncFile
functional_test.go:1851: local sync path: /home/jenkins/minikube-integration/17703-252966/.minikube/files/etc/test/nested/copy/258301/hosts
--- PASS: TestFunctional/serial/CopySyncFile (0.00s)

                                                
                                    
x
+
TestFunctional/serial/StartWithProxy (61.74s)

                                                
                                                
=== RUN   TestFunctional/serial/StartWithProxy
functional_test.go:2230: (dbg) Run:  out/minikube-linux-arm64 start -p functional-616785 --memory=4000 --apiserver-port=8441 --wait=all --driver=docker  --container-runtime=containerd
E1201 18:59:24.388505  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.crt: no such file or directory
E1201 18:59:24.395462  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.crt: no such file or directory
E1201 18:59:24.405900  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.crt: no such file or directory
E1201 18:59:24.426231  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.crt: no such file or directory
E1201 18:59:24.466476  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.crt: no such file or directory
E1201 18:59:24.546766  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.crt: no such file or directory
E1201 18:59:24.707125  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.crt: no such file or directory
E1201 18:59:25.027489  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.crt: no such file or directory
E1201 18:59:25.668175  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.crt: no such file or directory
E1201 18:59:26.948745  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.crt: no such file or directory
E1201 18:59:29.508942  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.crt: no such file or directory
E1201 18:59:34.629972  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.crt: no such file or directory
functional_test.go:2230: (dbg) Done: out/minikube-linux-arm64 start -p functional-616785 --memory=4000 --apiserver-port=8441 --wait=all --driver=docker  --container-runtime=containerd: (1m1.74399932s)
--- PASS: TestFunctional/serial/StartWithProxy (61.74s)

                                                
                                    
x
+
TestFunctional/serial/AuditLog (0s)

                                                
                                                
=== RUN   TestFunctional/serial/AuditLog
--- PASS: TestFunctional/serial/AuditLog (0.00s)

                                                
                                    
x
+
TestFunctional/serial/SoftStart (6.4s)

                                                
                                                
=== RUN   TestFunctional/serial/SoftStart
functional_test.go:655: (dbg) Run:  out/minikube-linux-arm64 start -p functional-616785 --alsologtostderr -v=8
E1201 18:59:44.870100  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.crt: no such file or directory
functional_test.go:655: (dbg) Done: out/minikube-linux-arm64 start -p functional-616785 --alsologtostderr -v=8: (6.39886523s)
functional_test.go:659: soft start took 6.401788094s for "functional-616785" cluster.
--- PASS: TestFunctional/serial/SoftStart (6.40s)

                                                
                                    
x
+
TestFunctional/serial/KubeContext (0.07s)

                                                
                                                
=== RUN   TestFunctional/serial/KubeContext
functional_test.go:677: (dbg) Run:  kubectl config current-context
--- PASS: TestFunctional/serial/KubeContext (0.07s)

                                                
                                    
x
+
TestFunctional/serial/KubectlGetPods (0.09s)

                                                
                                                
=== RUN   TestFunctional/serial/KubectlGetPods
functional_test.go:692: (dbg) Run:  kubectl --context functional-616785 get po -A
--- PASS: TestFunctional/serial/KubectlGetPods (0.09s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/add_remote (4.09s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/add_remote
functional_test.go:1045: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 cache add registry.k8s.io/pause:3.1
functional_test.go:1045: (dbg) Done: out/minikube-linux-arm64 -p functional-616785 cache add registry.k8s.io/pause:3.1: (1.483269492s)
functional_test.go:1045: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 cache add registry.k8s.io/pause:3.3
functional_test.go:1045: (dbg) Done: out/minikube-linux-arm64 -p functional-616785 cache add registry.k8s.io/pause:3.3: (1.378328254s)
functional_test.go:1045: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 cache add registry.k8s.io/pause:latest
functional_test.go:1045: (dbg) Done: out/minikube-linux-arm64 -p functional-616785 cache add registry.k8s.io/pause:latest: (1.231126996s)
--- PASS: TestFunctional/serial/CacheCmd/cache/add_remote (4.09s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/add_local (1.47s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/add_local
functional_test.go:1073: (dbg) Run:  docker build -t minikube-local-cache-test:functional-616785 /tmp/TestFunctionalserialCacheCmdcacheadd_local857853098/001
functional_test.go:1085: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 cache add minikube-local-cache-test:functional-616785
functional_test.go:1090: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 cache delete minikube-local-cache-test:functional-616785
functional_test.go:1079: (dbg) Run:  docker rmi minikube-local-cache-test:functional-616785
--- PASS: TestFunctional/serial/CacheCmd/cache/add_local (1.47s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/CacheDelete (0.08s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/CacheDelete
functional_test.go:1098: (dbg) Run:  out/minikube-linux-arm64 cache delete registry.k8s.io/pause:3.3
--- PASS: TestFunctional/serial/CacheCmd/cache/CacheDelete (0.08s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/list (0.07s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/list
functional_test.go:1106: (dbg) Run:  out/minikube-linux-arm64 cache list
--- PASS: TestFunctional/serial/CacheCmd/cache/list (0.07s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/verify_cache_inside_node (0.35s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/verify_cache_inside_node
functional_test.go:1120: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 ssh sudo crictl images
--- PASS: TestFunctional/serial/CacheCmd/cache/verify_cache_inside_node (0.35s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/cache_reload (2.28s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/cache_reload
functional_test.go:1143: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 ssh sudo crictl rmi registry.k8s.io/pause:latest
functional_test.go:1149: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 ssh sudo crictl inspecti registry.k8s.io/pause:latest
functional_test.go:1149: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-616785 ssh sudo crictl inspecti registry.k8s.io/pause:latest: exit status 1 (349.24387ms)

                                                
                                                
-- stdout --
	FATA[0000] no such image "registry.k8s.io/pause:latest" present 

                                                
                                                
-- /stdout --
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
functional_test.go:1154: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 cache reload
functional_test.go:1154: (dbg) Done: out/minikube-linux-arm64 -p functional-616785 cache reload: (1.170107921s)
functional_test.go:1159: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 ssh sudo crictl inspecti registry.k8s.io/pause:latest
--- PASS: TestFunctional/serial/CacheCmd/cache/cache_reload (2.28s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/delete (0.16s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/delete
functional_test.go:1168: (dbg) Run:  out/minikube-linux-arm64 cache delete registry.k8s.io/pause:3.1
functional_test.go:1168: (dbg) Run:  out/minikube-linux-arm64 cache delete registry.k8s.io/pause:latest
--- PASS: TestFunctional/serial/CacheCmd/cache/delete (0.16s)

                                                
                                    
x
+
TestFunctional/serial/MinikubeKubectlCmd (0.16s)

                                                
                                                
=== RUN   TestFunctional/serial/MinikubeKubectlCmd
functional_test.go:712: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 kubectl -- --context functional-616785 get pods
--- PASS: TestFunctional/serial/MinikubeKubectlCmd (0.16s)

                                                
                                    
x
+
TestFunctional/serial/MinikubeKubectlCmdDirectly (0.16s)

                                                
                                                
=== RUN   TestFunctional/serial/MinikubeKubectlCmdDirectly
functional_test.go:737: (dbg) Run:  out/kubectl --context functional-616785 get pods
--- PASS: TestFunctional/serial/MinikubeKubectlCmdDirectly (0.16s)

                                                
                                    
x
+
TestFunctional/serial/LogsCmd (1.62s)

                                                
                                                
=== RUN   TestFunctional/serial/LogsCmd
functional_test.go:1232: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 logs
functional_test.go:1232: (dbg) Done: out/minikube-linux-arm64 -p functional-616785 logs: (1.619922746s)
--- PASS: TestFunctional/serial/LogsCmd (1.62s)

                                                
                                    
x
+
TestFunctional/parallel/ConfigCmd (0.6s)

                                                
                                                
=== RUN   TestFunctional/parallel/ConfigCmd
=== PAUSE TestFunctional/parallel/ConfigCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ConfigCmd
functional_test.go:1195: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 config unset cpus
functional_test.go:1195: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 config get cpus
functional_test.go:1195: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-616785 config get cpus: exit status 14 (107.062169ms)

                                                
                                                
** stderr ** 
	Error: specified key could not be found in config

                                                
                                                
** /stderr **
functional_test.go:1195: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 config set cpus 2
functional_test.go:1195: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 config get cpus
functional_test.go:1195: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 config unset cpus
functional_test.go:1195: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 config get cpus
functional_test.go:1195: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-616785 config get cpus: exit status 14 (106.054763ms)

                                                
                                                
** stderr ** 
	Error: specified key could not be found in config

                                                
                                                
** /stderr **
--- PASS: TestFunctional/parallel/ConfigCmd (0.60s)

                                                
                                    
x
+
TestFunctional/parallel/DashboardCmd (9.41s)

                                                
                                                
=== RUN   TestFunctional/parallel/DashboardCmd
=== PAUSE TestFunctional/parallel/DashboardCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/DashboardCmd
functional_test.go:901: (dbg) daemon: [out/minikube-linux-arm64 dashboard --url --port 36195 -p functional-616785 --alsologtostderr -v=1]
functional_test.go:906: (dbg) stopping [out/minikube-linux-arm64 dashboard --url --port 36195 -p functional-616785 --alsologtostderr -v=1] ...
helpers_test.go:508: unable to kill pid 290239: os: process already finished
--- PASS: TestFunctional/parallel/DashboardCmd (9.41s)

                                                
                                    
x
+
TestFunctional/parallel/DryRun (0.79s)

                                                
                                                
=== RUN   TestFunctional/parallel/DryRun
=== PAUSE TestFunctional/parallel/DryRun

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/DryRun
functional_test.go:970: (dbg) Run:  out/minikube-linux-arm64 start -p functional-616785 --dry-run --memory 250MB --alsologtostderr --driver=docker  --container-runtime=containerd
functional_test.go:970: (dbg) Non-zero exit: out/minikube-linux-arm64 start -p functional-616785 --dry-run --memory 250MB --alsologtostderr --driver=docker  --container-runtime=containerd: exit status 23 (374.602673ms)

                                                
                                                
-- stdout --
	* [functional-616785] minikube v1.32.0 on Ubuntu 20.04 (arm64)
	  - MINIKUBE_LOCATION=17703
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/17703-252966/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/17703-252966/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-arm64
	  - MINIKUBE_FORCE_SYSTEMD=
	* Using the docker driver based on existing profile
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	I1201 19:02:17.908029  289728 out.go:296] Setting OutFile to fd 1 ...
	I1201 19:02:17.908177  289728 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I1201 19:02:17.908183  289728 out.go:309] Setting ErrFile to fd 2...
	I1201 19:02:17.908189  289728 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I1201 19:02:17.908486  289728 root.go:338] Updating PATH: /home/jenkins/minikube-integration/17703-252966/.minikube/bin
	I1201 19:02:17.908841  289728 out.go:303] Setting JSON to false
	I1201 19:02:17.909868  289728 start.go:128] hostinfo: {"hostname":"ip-172-31-31-251","uptime":6284,"bootTime":1701451054,"procs":259,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1050-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"982e3628-3742-4b3e-bb63-ac1b07660ec7"}
	I1201 19:02:17.909930  289728 start.go:138] virtualization:  
	I1201 19:02:17.914618  289728 out.go:177] * [functional-616785] minikube v1.32.0 on Ubuntu 20.04 (arm64)
	I1201 19:02:17.916774  289728 out.go:177]   - MINIKUBE_LOCATION=17703
	I1201 19:02:17.919996  289728 out.go:177]   - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I1201 19:02:17.916910  289728 notify.go:220] Checking for updates...
	I1201 19:02:17.928963  289728 out.go:177]   - KUBECONFIG=/home/jenkins/minikube-integration/17703-252966/kubeconfig
	I1201 19:02:17.931277  289728 out.go:177]   - MINIKUBE_HOME=/home/jenkins/minikube-integration/17703-252966/.minikube
	I1201 19:02:17.933901  289728 out.go:177]   - MINIKUBE_BIN=out/minikube-linux-arm64
	I1201 19:02:17.936340  289728 out.go:177]   - MINIKUBE_FORCE_SYSTEMD=
	I1201 19:02:17.938889  289728 config.go:182] Loaded profile config "functional-616785": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.4
	I1201 19:02:17.939530  289728 driver.go:392] Setting default libvirt URI to qemu:///system
	I1201 19:02:17.987070  289728 docker.go:122] docker version: linux-24.0.7:Docker Engine - Community
	I1201 19:02:17.987197  289728 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I1201 19:02:18.128703  289728 info.go:266] docker info: {ID:EOU5:DNGX:XN6V:L2FZ:UXRM:5TWK:EVUR:KC2F:GT7Z:Y4O4:GB77:5PD3 Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:31 OomKillDisable:true NGoroutines:45 SystemTime:2023-12-01 19:02:18.116701178 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1050-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archi
tecture:aarch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8215040000 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-31-251 Labels:[] ExperimentalBuild:false ServerVersion:24.0.7 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f Expected:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f} RuncCommit:{ID:v1.1.10-0-g18a0cb0 Expected:v1.1.10-0-g18a0cb0} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.21.0]] Warnings:<nil>}}
	I1201 19:02:18.128919  289728 docker.go:295] overlay module found
	I1201 19:02:18.131210  289728 out.go:177] * Using the docker driver based on existing profile
	I1201 19:02:18.133086  289728 start.go:298] selected driver: docker
	I1201 19:02:18.133112  289728 start.go:902] validating driver "docker" against &{Name:functional-616785 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f Memory:4000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.4 ClusterName:functional-616785 Namespace:default APIServerName:miniku
beCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8441 NodeName:} Nodes:[{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.28.4 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersi
on:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 AutoPauseInterval:1m0s GPUs:}
	I1201 19:02:18.133221  289728 start.go:913] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
	I1201 19:02:18.135627  289728 out.go:177] 
	W1201 19:02:18.137722  289728 out.go:239] X Exiting due to RSRC_INSUFFICIENT_REQ_MEMORY: Requested memory allocation 250MiB is less than the usable minimum of 1800MB
	X Exiting due to RSRC_INSUFFICIENT_REQ_MEMORY: Requested memory allocation 250MiB is less than the usable minimum of 1800MB
	I1201 19:02:18.139362  289728 out.go:177] 

                                                
                                                
** /stderr **
functional_test.go:987: (dbg) Run:  out/minikube-linux-arm64 start -p functional-616785 --dry-run --alsologtostderr -v=1 --driver=docker  --container-runtime=containerd
--- PASS: TestFunctional/parallel/DryRun (0.79s)

                                                
                                    
x
+
TestFunctional/parallel/InternationalLanguage (0.33s)

                                                
                                                
=== RUN   TestFunctional/parallel/InternationalLanguage
=== PAUSE TestFunctional/parallel/InternationalLanguage

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/InternationalLanguage
functional_test.go:1016: (dbg) Run:  out/minikube-linux-arm64 start -p functional-616785 --dry-run --memory 250MB --alsologtostderr --driver=docker  --container-runtime=containerd
functional_test.go:1016: (dbg) Non-zero exit: out/minikube-linux-arm64 start -p functional-616785 --dry-run --memory 250MB --alsologtostderr --driver=docker  --container-runtime=containerd: exit status 23 (329.342896ms)

                                                
                                                
-- stdout --
	* [functional-616785] minikube v1.32.0 sur Ubuntu 20.04 (arm64)
	  - MINIKUBE_LOCATION=17703
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/17703-252966/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/17703-252966/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-arm64
	  - MINIKUBE_FORCE_SYSTEMD=
	* Utilisation du pilote docker basé sur le profil existant
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	I1201 19:02:17.557229  289659 out.go:296] Setting OutFile to fd 1 ...
	I1201 19:02:17.557699  289659 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I1201 19:02:17.557732  289659 out.go:309] Setting ErrFile to fd 2...
	I1201 19:02:17.557751  289659 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I1201 19:02:17.560337  289659 root.go:338] Updating PATH: /home/jenkins/minikube-integration/17703-252966/.minikube/bin
	I1201 19:02:17.561028  289659 out.go:303] Setting JSON to false
	I1201 19:02:17.562401  289659 start.go:128] hostinfo: {"hostname":"ip-172-31-31-251","uptime":6284,"bootTime":1701451054,"procs":258,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1050-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"982e3628-3742-4b3e-bb63-ac1b07660ec7"}
	I1201 19:02:17.562511  289659 start.go:138] virtualization:  
	I1201 19:02:17.565498  289659 out.go:177] * [functional-616785] minikube v1.32.0 sur Ubuntu 20.04 (arm64)
	I1201 19:02:17.568178  289659 out.go:177]   - MINIKUBE_LOCATION=17703
	I1201 19:02:17.570060  289659 out.go:177]   - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I1201 19:02:17.568293  289659 notify.go:220] Checking for updates...
	I1201 19:02:17.574267  289659 out.go:177]   - KUBECONFIG=/home/jenkins/minikube-integration/17703-252966/kubeconfig
	I1201 19:02:17.576328  289659 out.go:177]   - MINIKUBE_HOME=/home/jenkins/minikube-integration/17703-252966/.minikube
	I1201 19:02:17.578364  289659 out.go:177]   - MINIKUBE_BIN=out/minikube-linux-arm64
	I1201 19:02:17.580380  289659 out.go:177]   - MINIKUBE_FORCE_SYSTEMD=
	I1201 19:02:17.583976  289659 config.go:182] Loaded profile config "functional-616785": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.4
	I1201 19:02:17.584939  289659 driver.go:392] Setting default libvirt URI to qemu:///system
	I1201 19:02:17.635801  289659 docker.go:122] docker version: linux-24.0.7:Docker Engine - Community
	I1201 19:02:17.635914  289659 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I1201 19:02:17.753101  289659 info.go:266] docker info: {ID:EOU5:DNGX:XN6V:L2FZ:UXRM:5TWK:EVUR:KC2F:GT7Z:Y4O4:GB77:5PD3 Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:31 OomKillDisable:true NGoroutines:45 SystemTime:2023-12-01 19:02:17.741610441 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1050-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archi
tecture:aarch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8215040000 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-31-251 Labels:[] ExperimentalBuild:false ServerVersion:24.0.7 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f Expected:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f} RuncCommit:{ID:v1.1.10-0-g18a0cb0 Expected:v1.1.10-0-g18a0cb0} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.21.0]] Warnings:<nil>}}
	I1201 19:02:17.753198  289659 docker.go:295] overlay module found
	I1201 19:02:17.757683  289659 out.go:177] * Utilisation du pilote docker basé sur le profil existant
	I1201 19:02:17.760254  289659 start.go:298] selected driver: docker
	I1201 19:02:17.760274  289659 start.go:902] validating driver "docker" against &{Name:functional-616785 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1701387262-17703@sha256:a5458414df1be5e58eff93b3e67e6ecaad7e51ab23139de15714f7345af15e2f Memory:4000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.4 ClusterName:functional-616785 Namespace:default APIServerName:miniku
beCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8441 NodeName:} Nodes:[{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.28.4 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersi
on:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 AutoPauseInterval:1m0s GPUs:}
	I1201 19:02:17.760377  289659 start.go:913] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
	I1201 19:02:17.763308  289659 out.go:177] 
	W1201 19:02:17.765739  289659 out.go:239] X Fermeture en raison de RSRC_INSUFFICIENT_REQ_MEMORY : L'allocation de mémoire demandée 250 Mio est inférieure au minimum utilisable de 1800 Mo
	X Fermeture en raison de RSRC_INSUFFICIENT_REQ_MEMORY : L'allocation de mémoire demandée 250 Mio est inférieure au minimum utilisable de 1800 Mo
	I1201 19:02:17.767931  289659 out.go:177] 

                                                
                                                
** /stderr **
--- PASS: TestFunctional/parallel/InternationalLanguage (0.33s)

                                                
                                    
x
+
TestFunctional/parallel/StatusCmd (1.2s)

                                                
                                                
=== RUN   TestFunctional/parallel/StatusCmd
=== PAUSE TestFunctional/parallel/StatusCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/StatusCmd
functional_test.go:850: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 status
functional_test.go:856: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 status -f host:{{.Host}},kublet:{{.Kubelet}},apiserver:{{.APIServer}},kubeconfig:{{.Kubeconfig}}
functional_test.go:868: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 status -o json
--- PASS: TestFunctional/parallel/StatusCmd (1.20s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmdConnect (8.97s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmdConnect
=== PAUSE TestFunctional/parallel/ServiceCmdConnect

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ServiceCmdConnect
functional_test.go:1626: (dbg) Run:  kubectl --context functional-616785 create deployment hello-node-connect --image=registry.k8s.io/echoserver-arm:1.8
functional_test.go:1634: (dbg) Run:  kubectl --context functional-616785 expose deployment hello-node-connect --type=NodePort --port=8080
functional_test.go:1639: (dbg) TestFunctional/parallel/ServiceCmdConnect: waiting 10m0s for pods matching "app=hello-node-connect" in namespace "default" ...
helpers_test.go:344: "hello-node-connect-7799dfb7c6-ndpnk" [c3ae393d-5e60-4d46-858f-28d2b5c6c878] Pending / Ready:ContainersNotReady (containers with unready status: [echoserver-arm]) / ContainersReady:ContainersNotReady (containers with unready status: [echoserver-arm])
helpers_test.go:344: "hello-node-connect-7799dfb7c6-ndpnk" [c3ae393d-5e60-4d46-858f-28d2b5c6c878] Running
functional_test.go:1639: (dbg) TestFunctional/parallel/ServiceCmdConnect: app=hello-node-connect healthy within 8.013718539s
functional_test.go:1648: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 service hello-node-connect --url
functional_test.go:1654: found endpoint for hello-node-connect: http://192.168.49.2:32522
functional_test.go:1674: http://192.168.49.2:32522: success! body:

                                                
                                                

                                                
                                                
Hostname: hello-node-connect-7799dfb7c6-ndpnk

                                                
                                                
Pod Information:
	-no pod information available-

                                                
                                                
Server values:
	server_version=nginx: 1.13.3 - lua: 10008

                                                
                                                
Request Information:
	client_address=10.244.0.1
	method=GET
	real path=/
	query=
	request_version=1.1
	request_uri=http://192.168.49.2:8080/

                                                
                                                
Request Headers:
	accept-encoding=gzip
	host=192.168.49.2:32522
	user-agent=Go-http-client/1.1

                                                
                                                
Request Body:
	-no body in request-

                                                
                                                
--- PASS: TestFunctional/parallel/ServiceCmdConnect (8.97s)

                                                
                                    
x
+
TestFunctional/parallel/AddonsCmd (0.2s)

                                                
                                                
=== RUN   TestFunctional/parallel/AddonsCmd
=== PAUSE TestFunctional/parallel/AddonsCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/AddonsCmd
functional_test.go:1689: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 addons list
functional_test.go:1701: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 addons list -o json
--- PASS: TestFunctional/parallel/AddonsCmd (0.20s)

                                                
                                    
x
+
TestFunctional/parallel/PersistentVolumeClaim (91.97s)

                                                
                                                
=== RUN   TestFunctional/parallel/PersistentVolumeClaim
=== PAUSE TestFunctional/parallel/PersistentVolumeClaim

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/PersistentVolumeClaim
functional_test_pvc_test.go:44: (dbg) TestFunctional/parallel/PersistentVolumeClaim: waiting 4m0s for pods matching "integration-test=storage-provisioner" in namespace "kube-system" ...
helpers_test.go:329: TestFunctional/parallel/PersistentVolumeClaim: WARNING: pod list for "kube-system" "integration-test=storage-provisioner" returned: Get "https://192.168.49.2:8441/api/v1/namespaces/kube-system/pods?labelSelector=integration-test%3Dstorage-provisioner": dial tcp 192.168.49.2:8441: connect: connection refused
helpers_test.go:329: TestFunctional/parallel/PersistentVolumeClaim: WARNING: pod list for "kube-system" "integration-test=storage-provisioner" returned: Get "https://192.168.49.2:8441/api/v1/namespaces/kube-system/pods?labelSelector=integration-test%3Dstorage-provisioner": dial tcp 192.168.49.2:8441: connect: connection refused
helpers_test.go:329: TestFunctional/parallel/PersistentVolumeClaim: WARNING: pod list for "kube-system" "integration-test=storage-provisioner" returned: Get "https://192.168.49.2:8441/api/v1/namespaces/kube-system/pods?labelSelector=integration-test%3Dstorage-provisioner": dial tcp 192.168.49.2:8441: connect: connection refused
helpers_test.go:329: TestFunctional/parallel/PersistentVolumeClaim: WARNING: pod list for "kube-system" "integration-test=storage-provisioner" returned: Get "https://192.168.49.2:8441/api/v1/namespaces/kube-system/pods?labelSelector=integration-test%3Dstorage-provisioner": dial tcp 192.168.49.2:8441: connect: connection refused
helpers_test.go:329: TestFunctional/parallel/PersistentVolumeClaim: WARNING: pod list for "kube-system" "integration-test=storage-provisioner" returned: Get "https://192.168.49.2:8441/api/v1/namespaces/kube-system/pods?labelSelector=integration-test%3Dstorage-provisioner": dial tcp 192.168.49.2:8441: connect: connection refused
helpers_test.go:329: TestFunctional/parallel/PersistentVolumeClaim: WARNING: pod list for "kube-system" "integration-test=storage-provisioner" returned: Get "https://192.168.49.2:8441/api/v1/namespaces/kube-system/pods?labelSelector=integration-test%3Dstorage-provisioner": dial tcp 192.168.49.2:8441: connect: connection refused
helpers_test.go:329: TestFunctional/parallel/PersistentVolumeClaim: WARNING: pod list for "kube-system" "integration-test=storage-provisioner" returned: Get "https://192.168.49.2:8441/api/v1/namespaces/kube-system/pods?labelSelector=integration-test%3Dstorage-provisioner": dial tcp 192.168.49.2:8441: connect: connection refused
helpers_test.go:329: TestFunctional/parallel/PersistentVolumeClaim: WARNING: pod list for "kube-system" "integration-test=storage-provisioner" returned: Get "https://192.168.49.2:8441/api/v1/namespaces/kube-system/pods?labelSelector=integration-test%3Dstorage-provisioner": dial tcp 192.168.49.2:8441: connect: connection refused
helpers_test.go:329: TestFunctional/parallel/PersistentVolumeClaim: WARNING: pod list for "kube-system" "integration-test=storage-provisioner" returned: Get "https://192.168.49.2:8441/api/v1/namespaces/kube-system/pods?labelSelector=integration-test%3Dstorage-provisioner": dial tcp 192.168.49.2:8441: connect: connection refused
helpers_test.go:329: TestFunctional/parallel/PersistentVolumeClaim: WARNING: pod list for "kube-system" "integration-test=storage-provisioner" returned: Get "https://192.168.49.2:8441/api/v1/namespaces/kube-system/pods?labelSelector=integration-test%3Dstorage-provisioner": dial tcp 192.168.49.2:8441: connect: connection refused
helpers_test.go:329: TestFunctional/parallel/PersistentVolumeClaim: WARNING: pod list for "kube-system" "integration-test=storage-provisioner" returned: Get "https://192.168.49.2:8441/api/v1/namespaces/kube-system/pods?labelSelector=integration-test%3Dstorage-provisioner": dial tcp 192.168.49.2:8441: connect: connection refused
helpers_test.go:344: "storage-provisioner" [393607ea-a066-4d39-93eb-75c52a6ab29e] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
helpers_test.go:344: "storage-provisioner" [393607ea-a066-4d39-93eb-75c52a6ab29e] Running
functional_test_pvc_test.go:44: (dbg) TestFunctional/parallel/PersistentVolumeClaim: integration-test=storage-provisioner healthy within 18.006679056s
functional_test_pvc_test.go:49: (dbg) Run:  kubectl --context functional-616785 get storageclass -o=json
functional_test_pvc_test.go:69: (dbg) Run:  kubectl --context functional-616785 apply -f testdata/storage-provisioner/pvc.yaml
functional_test_pvc_test.go:76: (dbg) Run:  kubectl --context functional-616785 get pvc myclaim -o=json
functional_test_pvc_test.go:76: (dbg) Run:  kubectl --context functional-616785 get pvc myclaim -o=json
functional_test_pvc_test.go:76: (dbg) Run:  kubectl --context functional-616785 get pvc myclaim -o=json
E1201 19:00:46.310813  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.crt: no such file or directory
functional_test_pvc_test.go:76: (dbg) Run:  kubectl --context functional-616785 get pvc myclaim -o=json
functional_test_pvc_test.go:76: (dbg) Run:  kubectl --context functional-616785 get pvc myclaim -o=json
functional_test_pvc_test.go:76: (dbg) Run:  kubectl --context functional-616785 get pvc myclaim -o=json
functional_test_pvc_test.go:125: (dbg) Run:  kubectl --context functional-616785 apply -f testdata/storage-provisioner/pod.yaml
functional_test_pvc_test.go:130: (dbg) TestFunctional/parallel/PersistentVolumeClaim: waiting 3m0s for pods matching "test=storage-provisioner" in namespace "default" ...
helpers_test.go:344: "sp-pod" [1845dc84-2b0f-4262-9440-ccfdcb7e3bf7] Pending
helpers_test.go:344: "sp-pod" [1845dc84-2b0f-4262-9440-ccfdcb7e3bf7] Pending: PodScheduled:Unschedulable (0/1 nodes are available: persistentvolumeclaim "myclaim" not found. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling..)
helpers_test.go:344: "sp-pod" [1845dc84-2b0f-4262-9440-ccfdcb7e3bf7] Pending: PodScheduled:Unschedulable (0/1 nodes are available: persistentvolume "pvc-2cda4043-6211-422b-a8bb-7de031dcdaed" not found. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling..)
helpers_test.go:344: "sp-pod" [1845dc84-2b0f-4262-9440-ccfdcb7e3bf7] Pending / Ready:ContainersNotReady (containers with unready status: [myfrontend]) / ContainersReady:ContainersNotReady (containers with unready status: [myfrontend])
helpers_test.go:344: "sp-pod" [1845dc84-2b0f-4262-9440-ccfdcb7e3bf7] Running
functional_test_pvc_test.go:130: (dbg) TestFunctional/parallel/PersistentVolumeClaim: test=storage-provisioner healthy within 35.010657253s
functional_test_pvc_test.go:100: (dbg) Run:  kubectl --context functional-616785 exec sp-pod -- touch /tmp/mount/foo
functional_test_pvc_test.go:106: (dbg) Run:  kubectl --context functional-616785 delete -f testdata/storage-provisioner/pod.yaml
functional_test_pvc_test.go:125: (dbg) Run:  kubectl --context functional-616785 apply -f testdata/storage-provisioner/pod.yaml
functional_test_pvc_test.go:130: (dbg) TestFunctional/parallel/PersistentVolumeClaim: waiting 3m0s for pods matching "test=storage-provisioner" in namespace "default" ...
helpers_test.go:344: "sp-pod" [e73d167a-5be4-4ef7-96f2-4171d7918858] Pending
helpers_test.go:344: "sp-pod" [e73d167a-5be4-4ef7-96f2-4171d7918858] Pending / Ready:ContainersNotReady (containers with unready status: [myfrontend]) / ContainersReady:ContainersNotReady (containers with unready status: [myfrontend])
helpers_test.go:344: "sp-pod" [e73d167a-5be4-4ef7-96f2-4171d7918858] Running
functional_test_pvc_test.go:130: (dbg) TestFunctional/parallel/PersistentVolumeClaim: test=storage-provisioner healthy within 8.018557526s
functional_test_pvc_test.go:114: (dbg) Run:  kubectl --context functional-616785 exec sp-pod -- ls /tmp/mount
--- PASS: TestFunctional/parallel/PersistentVolumeClaim (91.97s)

                                                
                                    
x
+
TestFunctional/parallel/SSHCmd (0.77s)

                                                
                                                
=== RUN   TestFunctional/parallel/SSHCmd
=== PAUSE TestFunctional/parallel/SSHCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/SSHCmd
functional_test.go:1724: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 ssh "echo hello"
functional_test.go:1741: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 ssh "cat /etc/hostname"
--- PASS: TestFunctional/parallel/SSHCmd (0.77s)

                                                
                                    
x
+
TestFunctional/parallel/CpCmd (1.64s)

                                                
                                                
=== RUN   TestFunctional/parallel/CpCmd
=== PAUSE TestFunctional/parallel/CpCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/CpCmd
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 cp testdata/cp-test.txt /home/docker/cp-test.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 ssh -n functional-616785 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 cp functional-616785:/home/docker/cp-test.txt /tmp/TestFunctionalparallelCpCmd108209694/001/cp-test.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 ssh -n functional-616785 "sudo cat /home/docker/cp-test.txt"
--- PASS: TestFunctional/parallel/CpCmd (1.64s)

                                                
                                    
x
+
TestFunctional/parallel/FileSync (0.43s)

                                                
                                                
=== RUN   TestFunctional/parallel/FileSync
=== PAUSE TestFunctional/parallel/FileSync

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/FileSync
functional_test.go:1925: Checking for existence of /etc/test/nested/copy/258301/hosts within VM
functional_test.go:1927: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 ssh "sudo cat /etc/test/nested/copy/258301/hosts"
functional_test.go:1932: file sync test content: Test file for checking file sync process
--- PASS: TestFunctional/parallel/FileSync (0.43s)

                                                
                                    
x
+
TestFunctional/parallel/CertSync (2.47s)

                                                
                                                
=== RUN   TestFunctional/parallel/CertSync
=== PAUSE TestFunctional/parallel/CertSync

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/CertSync
functional_test.go:1968: Checking for existence of /etc/ssl/certs/258301.pem within VM
functional_test.go:1969: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 ssh "sudo cat /etc/ssl/certs/258301.pem"
functional_test.go:1968: Checking for existence of /usr/share/ca-certificates/258301.pem within VM
functional_test.go:1969: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 ssh "sudo cat /usr/share/ca-certificates/258301.pem"
functional_test.go:1968: Checking for existence of /etc/ssl/certs/51391683.0 within VM
functional_test.go:1969: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 ssh "sudo cat /etc/ssl/certs/51391683.0"
functional_test.go:1995: Checking for existence of /etc/ssl/certs/2583012.pem within VM
functional_test.go:1996: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 ssh "sudo cat /etc/ssl/certs/2583012.pem"
functional_test.go:1995: Checking for existence of /usr/share/ca-certificates/2583012.pem within VM
functional_test.go:1996: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 ssh "sudo cat /usr/share/ca-certificates/2583012.pem"
functional_test.go:1995: Checking for existence of /etc/ssl/certs/3ec20f2e.0 within VM
functional_test.go:1996: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 ssh "sudo cat /etc/ssl/certs/3ec20f2e.0"
--- PASS: TestFunctional/parallel/CertSync (2.47s)

                                                
                                    
x
+
TestFunctional/parallel/NodeLabels (0.14s)

                                                
                                                
=== RUN   TestFunctional/parallel/NodeLabels
=== PAUSE TestFunctional/parallel/NodeLabels

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/NodeLabels
functional_test.go:218: (dbg) Run:  kubectl --context functional-616785 get nodes --output=go-template "--template='{{range $k, $v := (index .items 0).metadata.labels}}{{$k}} {{end}}'"
--- PASS: TestFunctional/parallel/NodeLabels (0.14s)

                                                
                                    
x
+
TestFunctional/parallel/NonActiveRuntimeDisabled (0.91s)

                                                
                                                
=== RUN   TestFunctional/parallel/NonActiveRuntimeDisabled
=== PAUSE TestFunctional/parallel/NonActiveRuntimeDisabled

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/NonActiveRuntimeDisabled
functional_test.go:2023: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 ssh "sudo systemctl is-active docker"
functional_test.go:2023: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-616785 ssh "sudo systemctl is-active docker": exit status 1 (415.496797ms)

                                                
                                                
-- stdout --
	inactive

                                                
                                                
-- /stdout --
** stderr ** 
	ssh: Process exited with status 3

                                                
                                                
** /stderr **
functional_test.go:2023: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 ssh "sudo systemctl is-active crio"
functional_test.go:2023: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-616785 ssh "sudo systemctl is-active crio": exit status 1 (491.027976ms)

                                                
                                                
-- stdout --
	inactive

                                                
                                                
-- /stdout --
** stderr ** 
	ssh: Process exited with status 3

                                                
                                                
** /stderr **
--- PASS: TestFunctional/parallel/NonActiveRuntimeDisabled (0.91s)

                                                
                                    
x
+
TestFunctional/parallel/License (0.43s)

                                                
                                                
=== RUN   TestFunctional/parallel/License
=== PAUSE TestFunctional/parallel/License

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/License
functional_test.go:2284: (dbg) Run:  out/minikube-linux-arm64 license
--- PASS: TestFunctional/parallel/License (0.43s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/StartTunnel (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/StartTunnel
functional_test_tunnel_test.go:129: (dbg) daemon: [out/minikube-linux-arm64 -p functional-616785 tunnel --alsologtostderr]
--- PASS: TestFunctional/parallel/TunnelCmd/serial/StartTunnel (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/DeployApp (7.26s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/DeployApp
functional_test.go:1436: (dbg) Run:  kubectl --context functional-616785 create deployment hello-node --image=registry.k8s.io/echoserver-arm:1.8
functional_test.go:1444: (dbg) Run:  kubectl --context functional-616785 expose deployment hello-node --type=NodePort --port=8080
functional_test.go:1449: (dbg) TestFunctional/parallel/ServiceCmd/DeployApp: waiting 10m0s for pods matching "app=hello-node" in namespace "default" ...
helpers_test.go:344: "hello-node-759d89bdcc-xmnfh" [97e4a460-c1e9-4233-82dd-03ec79b56c16] Pending / Ready:ContainersNotReady (containers with unready status: [echoserver-arm]) / ContainersReady:ContainersNotReady (containers with unready status: [echoserver-arm])
helpers_test.go:344: "hello-node-759d89bdcc-xmnfh" [97e4a460-c1e9-4233-82dd-03ec79b56c16] Running
functional_test.go:1449: (dbg) TestFunctional/parallel/ServiceCmd/DeployApp: app=hello-node healthy within 7.012458727s
--- PASS: TestFunctional/parallel/ServiceCmd/DeployApp (7.26s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/List (0.42s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/List
functional_test.go:1458: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 service list
E1201 19:02:08.231640  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.crt: no such file or directory
--- PASS: TestFunctional/parallel/ServiceCmd/List (0.42s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/JSONOutput (0.41s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/JSONOutput
functional_test.go:1488: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 service list -o json
functional_test.go:1493: Took "412.947812ms" to run "out/minikube-linux-arm64 -p functional-616785 service list -o json"
--- PASS: TestFunctional/parallel/ServiceCmd/JSONOutput (0.41s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/HTTPS (0.45s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/HTTPS
functional_test.go:1508: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 service --namespace=default --https --url hello-node
functional_test.go:1521: found endpoint: https://192.168.49.2:30591
--- PASS: TestFunctional/parallel/ServiceCmd/HTTPS (0.45s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/Format (0.43s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/Format
functional_test.go:1539: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 service hello-node --url --format={{.IP}}
--- PASS: TestFunctional/parallel/ServiceCmd/Format (0.43s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/URL (0.43s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/URL
functional_test.go:1558: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 service hello-node --url
functional_test.go:1564: found endpoint for hello-node: http://192.168.49.2:30591
--- PASS: TestFunctional/parallel/ServiceCmd/URL (0.43s)

                                                
                                    
x
+
TestFunctional/parallel/ProfileCmd/profile_not_create (0.48s)

                                                
                                                
=== RUN   TestFunctional/parallel/ProfileCmd/profile_not_create
functional_test.go:1269: (dbg) Run:  out/minikube-linux-arm64 profile lis
functional_test.go:1274: (dbg) Run:  out/minikube-linux-arm64 profile list --output json
--- PASS: TestFunctional/parallel/ProfileCmd/profile_not_create (0.48s)

                                                
                                    
x
+
TestFunctional/parallel/ProfileCmd/profile_list (0.47s)

                                                
                                                
=== RUN   TestFunctional/parallel/ProfileCmd/profile_list
functional_test.go:1309: (dbg) Run:  out/minikube-linux-arm64 profile list
functional_test.go:1314: Took "385.456025ms" to run "out/minikube-linux-arm64 profile list"
functional_test.go:1323: (dbg) Run:  out/minikube-linux-arm64 profile list -l
functional_test.go:1328: Took "80.57978ms" to run "out/minikube-linux-arm64 profile list -l"
--- PASS: TestFunctional/parallel/ProfileCmd/profile_list (0.47s)

                                                
                                    
x
+
TestFunctional/parallel/ProfileCmd/profile_json_output (0.44s)

                                                
                                                
=== RUN   TestFunctional/parallel/ProfileCmd/profile_json_output
functional_test.go:1360: (dbg) Run:  out/minikube-linux-arm64 profile list -o json
functional_test.go:1365: Took "362.38874ms" to run "out/minikube-linux-arm64 profile list -o json"
functional_test.go:1373: (dbg) Run:  out/minikube-linux-arm64 profile list -o json --light
functional_test.go:1378: Took "74.01498ms" to run "out/minikube-linux-arm64 profile list -o json --light"
--- PASS: TestFunctional/parallel/ProfileCmd/profile_json_output (0.44s)

                                                
                                    
x
+
TestFunctional/parallel/MountCmd/any-port (7.75s)

                                                
                                                
=== RUN   TestFunctional/parallel/MountCmd/any-port
functional_test_mount_test.go:73: (dbg) daemon: [out/minikube-linux-arm64 mount -p functional-616785 /tmp/TestFunctionalparallelMountCmdany-port710478125/001:/mount-9p --alsologtostderr -v=1]
functional_test_mount_test.go:107: wrote "test-1701457331375267443" to /tmp/TestFunctionalparallelMountCmdany-port710478125/001/created-by-test
functional_test_mount_test.go:107: wrote "test-1701457331375267443" to /tmp/TestFunctionalparallelMountCmdany-port710478125/001/created-by-test-removed-by-pod
functional_test_mount_test.go:107: wrote "test-1701457331375267443" to /tmp/TestFunctionalparallelMountCmdany-port710478125/001/test-1701457331375267443
functional_test_mount_test.go:115: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 ssh "findmnt -T /mount-9p | grep 9p"
functional_test_mount_test.go:115: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-616785 ssh "findmnt -T /mount-9p | grep 9p": exit status 1 (402.455057ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
functional_test_mount_test.go:115: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 ssh "findmnt -T /mount-9p | grep 9p"
functional_test_mount_test.go:129: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 ssh -- ls -la /mount-9p
functional_test_mount_test.go:133: guest mount directory contents
total 2
-rw-r--r-- 1 docker docker 24 Dec  1 19:02 created-by-test
-rw-r--r-- 1 docker docker 24 Dec  1 19:02 created-by-test-removed-by-pod
-rw-r--r-- 1 docker docker 24 Dec  1 19:02 test-1701457331375267443
functional_test_mount_test.go:137: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 ssh cat /mount-9p/test-1701457331375267443
functional_test_mount_test.go:148: (dbg) Run:  kubectl --context functional-616785 replace --force -f testdata/busybox-mount-test.yaml
functional_test_mount_test.go:153: (dbg) TestFunctional/parallel/MountCmd/any-port: waiting 4m0s for pods matching "integration-test=busybox-mount" in namespace "default" ...
helpers_test.go:344: "busybox-mount" [a1724734-916a-4acf-93ba-7689a74bf4eb] Pending
helpers_test.go:344: "busybox-mount" [a1724734-916a-4acf-93ba-7689a74bf4eb] Pending / Ready:ContainersNotReady (containers with unready status: [mount-munger]) / ContainersReady:ContainersNotReady (containers with unready status: [mount-munger])
helpers_test.go:344: "busybox-mount" [a1724734-916a-4acf-93ba-7689a74bf4eb] Pending: Initialized:PodCompleted / Ready:PodCompleted / ContainersReady:PodCompleted
helpers_test.go:344: "busybox-mount" [a1724734-916a-4acf-93ba-7689a74bf4eb] Succeeded: Initialized:PodCompleted / Ready:PodCompleted / ContainersReady:PodCompleted
functional_test_mount_test.go:153: (dbg) TestFunctional/parallel/MountCmd/any-port: integration-test=busybox-mount healthy within 4.021078456s
functional_test_mount_test.go:169: (dbg) Run:  kubectl --context functional-616785 logs busybox-mount
functional_test_mount_test.go:181: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 ssh stat /mount-9p/created-by-test
functional_test_mount_test.go:181: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 ssh stat /mount-9p/created-by-pod
functional_test_mount_test.go:90: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 ssh "sudo umount -f /mount-9p"
functional_test_mount_test.go:94: (dbg) stopping [out/minikube-linux-arm64 mount -p functional-616785 /tmp/TestFunctionalparallelMountCmdany-port710478125/001:/mount-9p --alsologtostderr -v=1] ...
--- PASS: TestFunctional/parallel/MountCmd/any-port (7.75s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/DeleteTunnel (0.11s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/DeleteTunnel
functional_test_tunnel_test.go:434: (dbg) stopping [out/minikube-linux-arm64 -p functional-616785 tunnel --alsologtostderr] ...
--- PASS: TestFunctional/parallel/TunnelCmd/serial/DeleteTunnel (0.11s)

                                                
                                    
x
+
TestFunctional/parallel/MountCmd/specific-port (2.77s)

                                                
                                                
=== RUN   TestFunctional/parallel/MountCmd/specific-port
functional_test_mount_test.go:213: (dbg) daemon: [out/minikube-linux-arm64 mount -p functional-616785 /tmp/TestFunctionalparallelMountCmdspecific-port3977377613/001:/mount-9p --alsologtostderr -v=1 --port 46464]
functional_test_mount_test.go:243: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 ssh "findmnt -T /mount-9p | grep 9p"
functional_test_mount_test.go:243: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-616785 ssh "findmnt -T /mount-9p | grep 9p": exit status 1 (650.144382ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
functional_test_mount_test.go:243: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 ssh "findmnt -T /mount-9p | grep 9p"
functional_test_mount_test.go:257: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 ssh -- ls -la /mount-9p
functional_test_mount_test.go:261: guest mount directory contents
total 0
functional_test_mount_test.go:263: (dbg) stopping [out/minikube-linux-arm64 mount -p functional-616785 /tmp/TestFunctionalparallelMountCmdspecific-port3977377613/001:/mount-9p --alsologtostderr -v=1 --port 46464] ...
functional_test_mount_test.go:264: reading mount text
functional_test_mount_test.go:278: done reading mount text
functional_test_mount_test.go:230: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 ssh "sudo umount -f /mount-9p"
functional_test_mount_test.go:230: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-616785 ssh "sudo umount -f /mount-9p": exit status 1 (352.105693ms)

                                                
                                                
-- stdout --
	umount: /mount-9p: not mounted.

                                                
                                                
-- /stdout --
** stderr ** 
	ssh: Process exited with status 32

                                                
                                                
** /stderr **
functional_test_mount_test.go:232: "out/minikube-linux-arm64 -p functional-616785 ssh \"sudo umount -f /mount-9p\"": exit status 1
functional_test_mount_test.go:234: (dbg) stopping [out/minikube-linux-arm64 mount -p functional-616785 /tmp/TestFunctionalparallelMountCmdspecific-port3977377613/001:/mount-9p --alsologtostderr -v=1 --port 46464] ...
--- PASS: TestFunctional/parallel/MountCmd/specific-port (2.77s)

                                                
                                    
x
+
TestFunctional/parallel/MountCmd/VerifyCleanup (2.87s)

                                                
                                                
=== RUN   TestFunctional/parallel/MountCmd/VerifyCleanup
functional_test_mount_test.go:298: (dbg) daemon: [out/minikube-linux-arm64 mount -p functional-616785 /tmp/TestFunctionalparallelMountCmdVerifyCleanup1521090816/001:/mount1 --alsologtostderr -v=1]
functional_test_mount_test.go:298: (dbg) daemon: [out/minikube-linux-arm64 mount -p functional-616785 /tmp/TestFunctionalparallelMountCmdVerifyCleanup1521090816/001:/mount2 --alsologtostderr -v=1]
functional_test_mount_test.go:298: (dbg) daemon: [out/minikube-linux-arm64 mount -p functional-616785 /tmp/TestFunctionalparallelMountCmdVerifyCleanup1521090816/001:/mount3 --alsologtostderr -v=1]
functional_test_mount_test.go:325: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 ssh "findmnt -T" /mount1
functional_test_mount_test.go:325: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-616785 ssh "findmnt -T" /mount1: exit status 1 (1.097995759s)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
functional_test_mount_test.go:325: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 ssh "findmnt -T" /mount1
functional_test_mount_test.go:325: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 ssh "findmnt -T" /mount2
functional_test_mount_test.go:325: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 ssh "findmnt -T" /mount3
functional_test_mount_test.go:370: (dbg) Run:  out/minikube-linux-arm64 mount -p functional-616785 --kill=true
functional_test_mount_test.go:313: (dbg) stopping [out/minikube-linux-arm64 mount -p functional-616785 /tmp/TestFunctionalparallelMountCmdVerifyCleanup1521090816/001:/mount1 --alsologtostderr -v=1] ...
helpers_test.go:490: unable to find parent, assuming dead: process does not exist
functional_test_mount_test.go:313: (dbg) stopping [out/minikube-linux-arm64 mount -p functional-616785 /tmp/TestFunctionalparallelMountCmdVerifyCleanup1521090816/001:/mount2 --alsologtostderr -v=1] ...
helpers_test.go:490: unable to find parent, assuming dead: process does not exist
functional_test_mount_test.go:313: (dbg) stopping [out/minikube-linux-arm64 mount -p functional-616785 /tmp/TestFunctionalparallelMountCmdVerifyCleanup1521090816/001:/mount3 --alsologtostderr -v=1] ...
helpers_test.go:490: unable to find parent, assuming dead: process does not exist
--- PASS: TestFunctional/parallel/MountCmd/VerifyCleanup (2.87s)

                                                
                                    
x
+
TestFunctional/parallel/Version/short (0.09s)

                                                
                                                
=== RUN   TestFunctional/parallel/Version/short
=== PAUSE TestFunctional/parallel/Version/short

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/Version/short
functional_test.go:2252: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 version --short
--- PASS: TestFunctional/parallel/Version/short (0.09s)

                                                
                                    
x
+
TestFunctional/parallel/Version/components (1.43s)

                                                
                                                
=== RUN   TestFunctional/parallel/Version/components
=== PAUSE TestFunctional/parallel/Version/components

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/Version/components
functional_test.go:2266: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 version -o=json --components
functional_test.go:2266: (dbg) Done: out/minikube-linux-arm64 -p functional-616785 version -o=json --components: (1.431094108s)
--- PASS: TestFunctional/parallel/Version/components (1.43s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageListShort (0.29s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageListShort
=== PAUSE TestFunctional/parallel/ImageCommands/ImageListShort

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ImageCommands/ImageListShort
functional_test.go:260: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 image ls --format short --alsologtostderr
functional_test.go:265: (dbg) Stdout: out/minikube-linux-arm64 -p functional-616785 image ls --format short --alsologtostderr:
registry.k8s.io/pause:latest
registry.k8s.io/pause:3.9
registry.k8s.io/pause:3.3
registry.k8s.io/pause:3.1
registry.k8s.io/kube-scheduler:v1.28.4
registry.k8s.io/kube-proxy:v1.28.4
registry.k8s.io/kube-controller-manager:v1.28.4
registry.k8s.io/kube-apiserver:v1.28.4
registry.k8s.io/etcd:3.5.9-0
registry.k8s.io/echoserver-arm:1.8
registry.k8s.io/coredns/coredns:v1.10.1
gcr.io/k8s-minikube/storage-provisioner:v5
gcr.io/k8s-minikube/busybox:1.28.4-glibc
docker.io/library/nginx:latest
docker.io/library/minikube-local-cache-test:functional-616785
docker.io/kindest/kindnetd:v20230809-80a64d96
functional_test.go:268: (dbg) Stderr: out/minikube-linux-arm64 -p functional-616785 image ls --format short --alsologtostderr:
I1201 19:02:44.648044  292065 out.go:296] Setting OutFile to fd 1 ...
I1201 19:02:44.648630  292065 out.go:343] TERM=,COLORTERM=, which probably does not support color
I1201 19:02:44.648669  292065 out.go:309] Setting ErrFile to fd 2...
I1201 19:02:44.648690  292065 out.go:343] TERM=,COLORTERM=, which probably does not support color
I1201 19:02:44.648996  292065 root.go:338] Updating PATH: /home/jenkins/minikube-integration/17703-252966/.minikube/bin
I1201 19:02:44.649734  292065 config.go:182] Loaded profile config "functional-616785": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.4
I1201 19:02:44.649943  292065 config.go:182] Loaded profile config "functional-616785": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.4
I1201 19:02:44.650602  292065 cli_runner.go:164] Run: docker container inspect functional-616785 --format={{.State.Status}}
I1201 19:02:44.674321  292065 ssh_runner.go:195] Run: systemctl --version
I1201 19:02:44.674373  292065 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-616785
I1201 19:02:44.705619  292065 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33098 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/functional-616785/id_rsa Username:docker}
I1201 19:02:44.806140  292065 ssh_runner.go:195] Run: sudo crictl images --output json
--- PASS: TestFunctional/parallel/ImageCommands/ImageListShort (0.29s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageListTable (0.42s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageListTable
=== PAUSE TestFunctional/parallel/ImageCommands/ImageListTable

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ImageCommands/ImageListTable
functional_test.go:260: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 image ls --format table --alsologtostderr
functional_test.go:265: (dbg) Stdout: out/minikube-linux-arm64 -p functional-616785 image ls --format table --alsologtostderr:
|---------------------------------------------|--------------------|---------------|--------|
|                    Image                    |        Tag         |   Image ID    |  Size  |
|---------------------------------------------|--------------------|---------------|--------|
| registry.k8s.io/pause                       | 3.1                | sha256:8057e0 | 262kB  |
| registry.k8s.io/pause                       | 3.3                | sha256:3d1873 | 249kB  |
| registry.k8s.io/pause                       | latest             | sha256:8cb209 | 71.3kB |
| gcr.io/k8s-minikube/busybox                 | 1.28.4-glibc       | sha256:1611cd | 1.94MB |
| registry.k8s.io/kube-proxy                  | v1.28.4            | sha256:3ca3ca | 22MB   |
| docker.io/library/nginx                     | latest             | sha256:5628e5 | 67.2MB |
| gcr.io/k8s-minikube/storage-provisioner     | v5                 | sha256:ba04bb | 8.03MB |
| registry.k8s.io/coredns/coredns             | v1.10.1            | sha256:97e046 | 14.6MB |
| registry.k8s.io/echoserver-arm              | 1.8                | sha256:72565b | 45.3MB |
| docker.io/kindest/kindnetd                  | v20230809-80a64d96 | sha256:04b4ea | 25.3MB |
| registry.k8s.io/kube-apiserver              | v1.28.4            | sha256:04b4c4 | 31.6MB |
| registry.k8s.io/kube-controller-manager     | v1.28.4            | sha256:9961cb | 30.4MB |
| registry.k8s.io/kube-scheduler              | v1.28.4            | sha256:05c284 | 17.1MB |
| registry.k8s.io/pause                       | 3.9                | sha256:829e9d | 268kB  |
| docker.io/library/minikube-local-cache-test | functional-616785  | sha256:a05381 | 1.01kB |
| registry.k8s.io/etcd                        | 3.5.9-0            | sha256:9cdd64 | 86.5MB |
|---------------------------------------------|--------------------|---------------|--------|
functional_test.go:268: (dbg) Stderr: out/minikube-linux-arm64 -p functional-616785 image ls --format table --alsologtostderr:
I1201 19:02:45.023469  292125 out.go:296] Setting OutFile to fd 1 ...
I1201 19:02:45.023765  292125 out.go:343] TERM=,COLORTERM=, which probably does not support color
I1201 19:02:45.023774  292125 out.go:309] Setting ErrFile to fd 2...
I1201 19:02:45.023780  292125 out.go:343] TERM=,COLORTERM=, which probably does not support color
I1201 19:02:45.024222  292125 root.go:338] Updating PATH: /home/jenkins/minikube-integration/17703-252966/.minikube/bin
I1201 19:02:45.025059  292125 config.go:182] Loaded profile config "functional-616785": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.4
I1201 19:02:45.025210  292125 config.go:182] Loaded profile config "functional-616785": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.4
I1201 19:02:45.025899  292125 cli_runner.go:164] Run: docker container inspect functional-616785 --format={{.State.Status}}
I1201 19:02:45.065849  292125 ssh_runner.go:195] Run: systemctl --version
I1201 19:02:45.065916  292125 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-616785
I1201 19:02:45.092872  292125 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33098 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/functional-616785/id_rsa Username:docker}
I1201 19:02:45.236514  292125 ssh_runner.go:195] Run: sudo crictl images --output json
--- PASS: TestFunctional/parallel/ImageCommands/ImageListTable (0.42s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageListJson (0.41s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageListJson
=== PAUSE TestFunctional/parallel/ImageCommands/ImageListJson

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ImageCommands/ImageListJson
functional_test.go:260: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 image ls --format json --alsologtostderr
functional_test.go:265: (dbg) Stdout: out/minikube-linux-arm64 -p functional-616785 image ls --format json --alsologtostderr:
[{"id":"sha256:5628e5ea3c17fa1cbf49692edf41d5a1cdf198922898e6ffb29c19768dca8fd3","repoDigests":["docker.io/library/nginx@sha256:10d1f5b58f74683ad34eb29287e07dab1e90f10af243f151bb50aa5dbb4d62ee"],"repoTags":["docker.io/library/nginx:latest"],"size":"67241575"},{"id":"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c","repoDigests":["gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e"],"repoTags":["gcr.io/k8s-minikube/busybox:1.28.4-glibc"],"size":"1935750"},{"id":"sha256:ba04bb24b95753201135cbc420b233c1b0b9fa2e1fd21d28319c348c33fbcde6","repoDigests":["gcr.io/k8s-minikube/storage-provisioner@sha256:18eb69d1418e854ad5a19e399310e52808a8321e4c441c1dddad8977a0d7a944"],"repoTags":["gcr.io/k8s-minikube/storage-provisioner:v5"],"size":"8034419"},{"id":"sha256:9961cbceaf234d59b7dcf8a197a024f3e3ce4b7fe2b67c2378efd3d209ca994b","repoDigests":["registry.k8s.io/kube-controller-manager@sha256:65486c8c338f96dc022dd1a0abe8763e38f35095b84b208c78f44d9
e99447d1c"],"repoTags":["registry.k8s.io/kube-controller-manager:v1.28.4"],"size":"30360149"},{"id":"sha256:05c284c929889d88306fdb3dd14ee2d0132543740f9e247685243214fc3d2c54","repoDigests":["registry.k8s.io/kube-scheduler@sha256:335bba9e861b88fa8b7bb9250bcd69b7a33f83da4fee93f9fc0eedc6f34e28ba"],"repoTags":["registry.k8s.io/kube-scheduler:v1.28.4"],"size":"17082307"},{"id":"sha256:04b4eaa3d3db8abea4b9ea4d10a0926ebb31db5a31b673aa1cf7a4b3af4add26","repoDigests":["docker.io/kindest/kindnetd@sha256:4a58d1cd2b45bf2460762a51a4aa9c80861f460af35800c05baab0573f923052"],"repoTags":["docker.io/kindest/kindnetd:v20230809-80a64d96"],"size":"25324029"},{"id":"sha256:20b332c9a70d8516d849d1ac23eff5800cbb2f263d379f0ec11ee908db6b25a8","repoDigests":["docker.io/kubernetesui/dashboard@sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93"],"repoTags":[],"size":"74084559"},{"id":"sha256:a05381ee34b38cae54c9eb6259b92a2eb4ecd3a17da6d2da551363bc531f0f61","repoDigests":[],"repoTags":["docker.io/library/minikube-local-
cache-test:functional-616785"],"size":"1006"},{"id":"sha256:97e04611ad43405a2e5863ae17c6f1bc9181bdefdaa78627c432ef754a4eb108","repoDigests":["registry.k8s.io/coredns/coredns@sha256:a0ead06651cf580044aeb0a0feba63591858fb2e43ade8c9dea45a6a89ae7e5e"],"repoTags":["registry.k8s.io/coredns/coredns:v1.10.1"],"size":"14557471"},{"id":"sha256:9cdd6470f48c8b127530b7ce6ea4b3524137984481e48bcde619735890840ace","repoDigests":["registry.k8s.io/etcd@sha256:e013d0d5e4e25d00c61a7ff839927a1f36479678f11e49502b53a5e0b14f10c3"],"repoTags":["registry.k8s.io/etcd:3.5.9-0"],"size":"86464836"},{"id":"sha256:829e9de338bd5fdd3f16f68f83a9fb288fbc8453e881e5d5cfd0f6f2ff72b43e","repoDigests":["registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097"],"repoTags":["registry.k8s.io/pause:3.9"],"size":"268051"},{"id":"sha256:8cb2091f603e75187e2f6226c5901d12e00b1d1f778c6471ae4578e8a1c4724a","repoDigests":[],"repoTags":["registry.k8s.io/pause:latest"],"size":"71300"},{"id":"sha256:04b4c447bb9d4840af3bf7e8363
97379d65df87c86e55dcd27f31a8d11df2419","repoDigests":["registry.k8s.io/kube-apiserver@sha256:5b28a364467cf7e134343bb3ee2c6d40682b473a743a72142c7bbe25767d36eb"],"repoTags":["registry.k8s.io/kube-apiserver:v1.28.4"],"size":"31582354"},{"id":"sha256:3ca3ca488cf13fde14cfc4b3ffde0c53a8c161b030f4a444a797fba6aef38c39","repoDigests":["registry.k8s.io/kube-proxy@sha256:e63408a0f5068a7e9d4b34fd72b4a2b0e5512509b53cd2123a37fc991b0ef532"],"repoTags":["registry.k8s.io/kube-proxy:v1.28.4"],"size":"22001357"},{"id":"sha256:8057e0500773a37cde2cff041eb13ebd68c748419a2fbfd1dfb5bf38696cc8e5","repoDigests":[],"repoTags":["registry.k8s.io/pause:3.1"],"size":"262191"},{"id":"sha256:a422e0e982356f6c1cf0e5bb7b733363caae3992a07c99951fbcc73e58ed656a","repoDigests":["docker.io/kubernetesui/metrics-scraper@sha256:76049887f07a0476dc93efc2d3569b9529bf982b22d29f356092ce206e98765c"],"repoTags":[],"size":"18306114"},{"id":"sha256:72565bf5bbedfb62e9d21afa2b1221b2c7a5e05b746dae33430bc550d3f87beb","repoDigests":["registry.k8s.io/echoserver-arm@s
ha256:b33d4cdf6ed097f4e9b77b135d83a596ab73c6268b0342648818eb85f5edfdb5"],"repoTags":["registry.k8s.io/echoserver-arm:1.8"],"size":"45324675"},{"id":"sha256:3d18732f8686cc3c878055d99a05fa80289502fa496b36b6a0fe0f77206a7300","repoDigests":[],"repoTags":["registry.k8s.io/pause:3.3"],"size":"249461"}]
functional_test.go:268: (dbg) Stderr: out/minikube-linux-arm64 -p functional-616785 image ls --format json --alsologtostderr:
I1201 19:02:44.948316  292120 out.go:296] Setting OutFile to fd 1 ...
I1201 19:02:44.948556  292120 out.go:343] TERM=,COLORTERM=, which probably does not support color
I1201 19:02:44.948568  292120 out.go:309] Setting ErrFile to fd 2...
I1201 19:02:44.948574  292120 out.go:343] TERM=,COLORTERM=, which probably does not support color
I1201 19:02:44.948832  292120 root.go:338] Updating PATH: /home/jenkins/minikube-integration/17703-252966/.minikube/bin
I1201 19:02:44.949488  292120 config.go:182] Loaded profile config "functional-616785": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.4
I1201 19:02:44.949626  292120 config.go:182] Loaded profile config "functional-616785": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.4
I1201 19:02:44.950219  292120 cli_runner.go:164] Run: docker container inspect functional-616785 --format={{.State.Status}}
I1201 19:02:44.972421  292120 ssh_runner.go:195] Run: systemctl --version
I1201 19:02:44.972495  292120 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-616785
I1201 19:02:45.003778  292120 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33098 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/functional-616785/id_rsa Username:docker}
I1201 19:02:45.184353  292120 ssh_runner.go:195] Run: sudo crictl images --output json
--- PASS: TestFunctional/parallel/ImageCommands/ImageListJson (0.41s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageListYaml (0.35s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageListYaml
=== PAUSE TestFunctional/parallel/ImageCommands/ImageListYaml

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ImageCommands/ImageListYaml
functional_test.go:260: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 image ls --format yaml --alsologtostderr
functional_test.go:265: (dbg) Stdout: out/minikube-linux-arm64 -p functional-616785 image ls --format yaml --alsologtostderr:
- id: sha256:97e04611ad43405a2e5863ae17c6f1bc9181bdefdaa78627c432ef754a4eb108
repoDigests:
- registry.k8s.io/coredns/coredns@sha256:a0ead06651cf580044aeb0a0feba63591858fb2e43ade8c9dea45a6a89ae7e5e
repoTags:
- registry.k8s.io/coredns/coredns:v1.10.1
size: "14557471"
- id: sha256:72565bf5bbedfb62e9d21afa2b1221b2c7a5e05b746dae33430bc550d3f87beb
repoDigests:
- registry.k8s.io/echoserver-arm@sha256:b33d4cdf6ed097f4e9b77b135d83a596ab73c6268b0342648818eb85f5edfdb5
repoTags:
- registry.k8s.io/echoserver-arm:1.8
size: "45324675"
- id: sha256:9961cbceaf234d59b7dcf8a197a024f3e3ce4b7fe2b67c2378efd3d209ca994b
repoDigests:
- registry.k8s.io/kube-controller-manager@sha256:65486c8c338f96dc022dd1a0abe8763e38f35095b84b208c78f44d9e99447d1c
repoTags:
- registry.k8s.io/kube-controller-manager:v1.28.4
size: "30360149"
- id: sha256:829e9de338bd5fdd3f16f68f83a9fb288fbc8453e881e5d5cfd0f6f2ff72b43e
repoDigests:
- registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097
repoTags:
- registry.k8s.io/pause:3.9
size: "268051"
- id: sha256:04b4eaa3d3db8abea4b9ea4d10a0926ebb31db5a31b673aa1cf7a4b3af4add26
repoDigests:
- docker.io/kindest/kindnetd@sha256:4a58d1cd2b45bf2460762a51a4aa9c80861f460af35800c05baab0573f923052
repoTags:
- docker.io/kindest/kindnetd:v20230809-80a64d96
size: "25324029"
- id: sha256:3ca3ca488cf13fde14cfc4b3ffde0c53a8c161b030f4a444a797fba6aef38c39
repoDigests:
- registry.k8s.io/kube-proxy@sha256:e63408a0f5068a7e9d4b34fd72b4a2b0e5512509b53cd2123a37fc991b0ef532
repoTags:
- registry.k8s.io/kube-proxy:v1.28.4
size: "22001357"
- id: sha256:8057e0500773a37cde2cff041eb13ebd68c748419a2fbfd1dfb5bf38696cc8e5
repoDigests: []
repoTags:
- registry.k8s.io/pause:3.1
size: "262191"
- id: sha256:9cdd6470f48c8b127530b7ce6ea4b3524137984481e48bcde619735890840ace
repoDigests:
- registry.k8s.io/etcd@sha256:e013d0d5e4e25d00c61a7ff839927a1f36479678f11e49502b53a5e0b14f10c3
repoTags:
- registry.k8s.io/etcd:3.5.9-0
size: "86464836"
- id: sha256:05c284c929889d88306fdb3dd14ee2d0132543740f9e247685243214fc3d2c54
repoDigests:
- registry.k8s.io/kube-scheduler@sha256:335bba9e861b88fa8b7bb9250bcd69b7a33f83da4fee93f9fc0eedc6f34e28ba
repoTags:
- registry.k8s.io/kube-scheduler:v1.28.4
size: "17082307"
- id: sha256:3d18732f8686cc3c878055d99a05fa80289502fa496b36b6a0fe0f77206a7300
repoDigests: []
repoTags:
- registry.k8s.io/pause:3.3
size: "249461"
- id: sha256:8cb2091f603e75187e2f6226c5901d12e00b1d1f778c6471ae4578e8a1c4724a
repoDigests: []
repoTags:
- registry.k8s.io/pause:latest
size: "71300"
- id: sha256:a422e0e982356f6c1cf0e5bb7b733363caae3992a07c99951fbcc73e58ed656a
repoDigests:
- docker.io/kubernetesui/metrics-scraper@sha256:76049887f07a0476dc93efc2d3569b9529bf982b22d29f356092ce206e98765c
repoTags: []
size: "18306114"
- id: sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c
repoDigests:
- gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e
repoTags:
- gcr.io/k8s-minikube/busybox:1.28.4-glibc
size: "1935750"
- id: sha256:ba04bb24b95753201135cbc420b233c1b0b9fa2e1fd21d28319c348c33fbcde6
repoDigests:
- gcr.io/k8s-minikube/storage-provisioner@sha256:18eb69d1418e854ad5a19e399310e52808a8321e4c441c1dddad8977a0d7a944
repoTags:
- gcr.io/k8s-minikube/storage-provisioner:v5
size: "8034419"
- id: sha256:04b4c447bb9d4840af3bf7e836397379d65df87c86e55dcd27f31a8d11df2419
repoDigests:
- registry.k8s.io/kube-apiserver@sha256:5b28a364467cf7e134343bb3ee2c6d40682b473a743a72142c7bbe25767d36eb
repoTags:
- registry.k8s.io/kube-apiserver:v1.28.4
size: "31582354"
- id: sha256:20b332c9a70d8516d849d1ac23eff5800cbb2f263d379f0ec11ee908db6b25a8
repoDigests:
- docker.io/kubernetesui/dashboard@sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93
repoTags: []
size: "74084559"
- id: sha256:a05381ee34b38cae54c9eb6259b92a2eb4ecd3a17da6d2da551363bc531f0f61
repoDigests: []
repoTags:
- docker.io/library/minikube-local-cache-test:functional-616785
size: "1006"
- id: sha256:5628e5ea3c17fa1cbf49692edf41d5a1cdf198922898e6ffb29c19768dca8fd3
repoDigests:
- docker.io/library/nginx@sha256:10d1f5b58f74683ad34eb29287e07dab1e90f10af243f151bb50aa5dbb4d62ee
repoTags:
- docker.io/library/nginx:latest
size: "67241575"

                                                
                                                
functional_test.go:268: (dbg) Stderr: out/minikube-linux-arm64 -p functional-616785 image ls --format yaml --alsologtostderr:
I1201 19:02:44.660177  292066 out.go:296] Setting OutFile to fd 1 ...
I1201 19:02:44.660456  292066 out.go:343] TERM=,COLORTERM=, which probably does not support color
I1201 19:02:44.660477  292066 out.go:309] Setting ErrFile to fd 2...
I1201 19:02:44.660483  292066 out.go:343] TERM=,COLORTERM=, which probably does not support color
I1201 19:02:44.660899  292066 root.go:338] Updating PATH: /home/jenkins/minikube-integration/17703-252966/.minikube/bin
I1201 19:02:44.662063  292066 config.go:182] Loaded profile config "functional-616785": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.4
I1201 19:02:44.662234  292066 config.go:182] Loaded profile config "functional-616785": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.4
I1201 19:02:44.663193  292066 cli_runner.go:164] Run: docker container inspect functional-616785 --format={{.State.Status}}
I1201 19:02:44.691709  292066 ssh_runner.go:195] Run: systemctl --version
I1201 19:02:44.691769  292066 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-616785
I1201 19:02:44.725685  292066 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33098 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/functional-616785/id_rsa Username:docker}
I1201 19:02:44.830975  292066 ssh_runner.go:195] Run: sudo crictl images --output json
--- PASS: TestFunctional/parallel/ImageCommands/ImageListYaml (0.35s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageBuild (3.08s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageBuild
=== PAUSE TestFunctional/parallel/ImageCommands/ImageBuild

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ImageCommands/ImageBuild
functional_test.go:307: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 ssh pgrep buildkitd
functional_test.go:307: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-616785 ssh pgrep buildkitd: exit status 1 (375.085077ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
functional_test.go:314: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 image build -t localhost/my-image:functional-616785 testdata/build --alsologtostderr
functional_test.go:314: (dbg) Done: out/minikube-linux-arm64 -p functional-616785 image build -t localhost/my-image:functional-616785 testdata/build --alsologtostderr: (2.403720725s)
functional_test.go:322: (dbg) Stderr: out/minikube-linux-arm64 -p functional-616785 image build -t localhost/my-image:functional-616785 testdata/build --alsologtostderr:
I1201 19:02:45.711769  292226 out.go:296] Setting OutFile to fd 1 ...
I1201 19:02:45.712321  292226 out.go:343] TERM=,COLORTERM=, which probably does not support color
I1201 19:02:45.712328  292226 out.go:309] Setting ErrFile to fd 2...
I1201 19:02:45.712334  292226 out.go:343] TERM=,COLORTERM=, which probably does not support color
I1201 19:02:45.712621  292226 root.go:338] Updating PATH: /home/jenkins/minikube-integration/17703-252966/.minikube/bin
I1201 19:02:45.713299  292226 config.go:182] Loaded profile config "functional-616785": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.4
I1201 19:02:45.715867  292226 config.go:182] Loaded profile config "functional-616785": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.4
I1201 19:02:45.716699  292226 cli_runner.go:164] Run: docker container inspect functional-616785 --format={{.State.Status}}
I1201 19:02:45.736107  292226 ssh_runner.go:195] Run: systemctl --version
I1201 19:02:45.736165  292226 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-616785
I1201 19:02:45.755951  292226 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33098 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/functional-616785/id_rsa Username:docker}
I1201 19:02:45.858554  292226 build_images.go:151] Building image from path: /tmp/build.4212939881.tar
I1201 19:02:45.858628  292226 ssh_runner.go:195] Run: sudo mkdir -p /var/lib/minikube/build
I1201 19:02:45.869557  292226 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/build/build.4212939881.tar
I1201 19:02:45.874085  292226 ssh_runner.go:352] existence check for /var/lib/minikube/build/build.4212939881.tar: stat -c "%s %y" /var/lib/minikube/build/build.4212939881.tar: Process exited with status 1
stdout:

                                                
                                                
stderr:
stat: cannot statx '/var/lib/minikube/build/build.4212939881.tar': No such file or directory
I1201 19:02:45.874156  292226 ssh_runner.go:362] scp /tmp/build.4212939881.tar --> /var/lib/minikube/build/build.4212939881.tar (3072 bytes)
I1201 19:02:45.905032  292226 ssh_runner.go:195] Run: sudo mkdir -p /var/lib/minikube/build/build.4212939881
I1201 19:02:45.915932  292226 ssh_runner.go:195] Run: sudo tar -C /var/lib/minikube/build/build.4212939881 -xf /var/lib/minikube/build/build.4212939881.tar
I1201 19:02:45.927126  292226 containerd.go:378] Building image: /var/lib/minikube/build/build.4212939881
I1201 19:02:45.927205  292226 ssh_runner.go:195] Run: sudo buildctl build --frontend dockerfile.v0 --local context=/var/lib/minikube/build/build.4212939881 --local dockerfile=/var/lib/minikube/build/build.4212939881 --output type=image,name=localhost/my-image:functional-616785
#1 [internal] load build definition from Dockerfile
#1 transferring dockerfile: 97B done
#1 DONE 0.0s

                                                
                                                
#2 [internal] load metadata for gcr.io/k8s-minikube/busybox:latest
#2 DONE 1.0s

                                                
                                                
#3 [internal] load .dockerignore
#3 transferring context: 2B done
#3 DONE 0.0s

                                                
                                                
#4 [internal] load build context
#4 transferring context: 62B done
#4 DONE 0.0s

                                                
                                                
#5 [1/3] FROM gcr.io/k8s-minikube/busybox:latest@sha256:ca5ae90100d50772da31f3b5016209e25ad61972404e2ccd83d44f10dee7e79b
#5 resolve gcr.io/k8s-minikube/busybox:latest@sha256:ca5ae90100d50772da31f3b5016209e25ad61972404e2ccd83d44f10dee7e79b 0.0s done
#5 DONE 0.1s

                                                
                                                
#5 [1/3] FROM gcr.io/k8s-minikube/busybox:latest@sha256:ca5ae90100d50772da31f3b5016209e25ad61972404e2ccd83d44f10dee7e79b
#5 sha256:a01966dde7f8d5ba10b6d87e776c7c8fb5a5f6bfa678874bd28b33b1fc6dba34 828.50kB / 828.50kB 0.2s done
#5 extracting sha256:a01966dde7f8d5ba10b6d87e776c7c8fb5a5f6bfa678874bd28b33b1fc6dba34
#5 extracting sha256:a01966dde7f8d5ba10b6d87e776c7c8fb5a5f6bfa678874bd28b33b1fc6dba34 0.1s done
#5 DONE 0.4s

                                                
                                                
#6 [2/3] RUN true
#6 DONE 0.2s

                                                
                                                
#7 [3/3] ADD content.txt /
#7 DONE 0.0s

                                                
                                                
#8 exporting to image
#8 exporting layers 0.1s done
#8 exporting manifest sha256:055722ac86c06a1b375cdf2184004a61f7d23ce85f1bf8ac77b38f61ef4c27e2
#8 exporting manifest sha256:055722ac86c06a1b375cdf2184004a61f7d23ce85f1bf8ac77b38f61ef4c27e2 0.0s done
#8 exporting config sha256:c0f8b24656d3b4fb7053a1bc41aaf7dec2fa8199c3abae35d06c04c0ec2cc58b 0.0s done
#8 naming to localhost/my-image:functional-616785 done
#8 DONE 0.1s
I1201 19:02:48.002521  292226 ssh_runner.go:235] Completed: sudo buildctl build --frontend dockerfile.v0 --local context=/var/lib/minikube/build/build.4212939881 --local dockerfile=/var/lib/minikube/build/build.4212939881 --output type=image,name=localhost/my-image:functional-616785: (2.075285275s)
I1201 19:02:48.002591  292226 ssh_runner.go:195] Run: sudo rm -rf /var/lib/minikube/build/build.4212939881
I1201 19:02:48.019515  292226 ssh_runner.go:195] Run: sudo rm -f /var/lib/minikube/build/build.4212939881.tar
I1201 19:02:48.031886  292226 build_images.go:207] Built localhost/my-image:functional-616785 from /tmp/build.4212939881.tar
I1201 19:02:48.031921  292226 build_images.go:123] succeeded building to: functional-616785
I1201 19:02:48.031927  292226 build_images.go:124] failed building to: 
functional_test.go:447: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 image ls
--- PASS: TestFunctional/parallel/ImageCommands/ImageBuild (3.08s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/Setup (2s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/Setup
functional_test.go:341: (dbg) Run:  docker pull gcr.io/google-containers/addon-resizer:1.8.8
2023/12/01 19:02:27 [DEBUG] GET http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/
functional_test.go:341: (dbg) Done: docker pull gcr.io/google-containers/addon-resizer:1.8.8: (1.972452954s)
functional_test.go:346: (dbg) Run:  docker tag gcr.io/google-containers/addon-resizer:1.8.8 gcr.io/google-containers/addon-resizer:functional-616785
--- PASS: TestFunctional/parallel/ImageCommands/Setup (2.00s)

                                                
                                    
x
+
TestFunctional/parallel/UpdateContextCmd/no_changes (0.28s)

                                                
                                                
=== RUN   TestFunctional/parallel/UpdateContextCmd/no_changes
=== PAUSE TestFunctional/parallel/UpdateContextCmd/no_changes

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/UpdateContextCmd/no_changes
functional_test.go:2115: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 update-context --alsologtostderr -v=2
--- PASS: TestFunctional/parallel/UpdateContextCmd/no_changes (0.28s)

                                                
                                    
x
+
TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster (0.28s)

                                                
                                                
=== RUN   TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster
=== PAUSE TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster
functional_test.go:2115: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 update-context --alsologtostderr -v=2
--- PASS: TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster (0.28s)

                                                
                                    
x
+
TestFunctional/parallel/UpdateContextCmd/no_clusters (0.26s)

                                                
                                                
=== RUN   TestFunctional/parallel/UpdateContextCmd/no_clusters
=== PAUSE TestFunctional/parallel/UpdateContextCmd/no_clusters

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/UpdateContextCmd/no_clusters
functional_test.go:2115: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 update-context --alsologtostderr -v=2
--- PASS: TestFunctional/parallel/UpdateContextCmd/no_clusters (0.26s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageRemove (0.53s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageRemove
functional_test.go:391: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 image rm gcr.io/google-containers/addon-resizer:functional-616785 --alsologtostderr
functional_test.go:447: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 image ls
--- PASS: TestFunctional/parallel/ImageCommands/ImageRemove (0.53s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageSaveDaemon (0.64s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageSaveDaemon
functional_test.go:418: (dbg) Run:  docker rmi gcr.io/google-containers/addon-resizer:functional-616785
functional_test.go:423: (dbg) Run:  out/minikube-linux-arm64 -p functional-616785 image save --daemon gcr.io/google-containers/addon-resizer:functional-616785 --alsologtostderr
functional_test.go:428: (dbg) Run:  docker image inspect gcr.io/google-containers/addon-resizer:functional-616785
--- PASS: TestFunctional/parallel/ImageCommands/ImageSaveDaemon (0.64s)

                                                
                                    
x
+
TestFunctional/delete_addon-resizer_images (0.08s)

                                                
                                                
=== RUN   TestFunctional/delete_addon-resizer_images
functional_test.go:189: (dbg) Run:  docker rmi -f gcr.io/google-containers/addon-resizer:1.8.8
functional_test.go:189: (dbg) Run:  docker rmi -f gcr.io/google-containers/addon-resizer:functional-616785
--- PASS: TestFunctional/delete_addon-resizer_images (0.08s)

                                                
                                    
x
+
TestFunctional/delete_my-image_image (0.02s)

                                                
                                                
=== RUN   TestFunctional/delete_my-image_image
functional_test.go:197: (dbg) Run:  docker rmi -f localhost/my-image:functional-616785
--- PASS: TestFunctional/delete_my-image_image (0.02s)

                                                
                                    
x
+
TestFunctional/delete_minikube_cached_images (0.02s)

                                                
                                                
=== RUN   TestFunctional/delete_minikube_cached_images
functional_test.go:205: (dbg) Run:  docker rmi -f minikube-local-cache-test:functional-616785
--- PASS: TestFunctional/delete_minikube_cached_images (0.02s)

                                                
                                    
x
+
TestIngressAddonLegacy/StartLegacyK8sCluster (94.65s)

                                                
                                                
=== RUN   TestIngressAddonLegacy/StartLegacyK8sCluster
ingress_addon_legacy_test.go:39: (dbg) Run:  out/minikube-linux-arm64 start -p ingress-addon-legacy-853196 --kubernetes-version=v1.18.20 --memory=4096 --wait=true --alsologtostderr -v=5 --driver=docker  --container-runtime=containerd
E1201 19:04:24.386729  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.crt: no such file or directory
ingress_addon_legacy_test.go:39: (dbg) Done: out/minikube-linux-arm64 start -p ingress-addon-legacy-853196 --kubernetes-version=v1.18.20 --memory=4096 --wait=true --alsologtostderr -v=5 --driver=docker  --container-runtime=containerd: (1m34.6472799s)
--- PASS: TestIngressAddonLegacy/StartLegacyK8sCluster (94.65s)

                                                
                                    
x
+
TestIngressAddonLegacy/serial/ValidateIngressAddonActivation (10.59s)

                                                
                                                
=== RUN   TestIngressAddonLegacy/serial/ValidateIngressAddonActivation
ingress_addon_legacy_test.go:70: (dbg) Run:  out/minikube-linux-arm64 -p ingress-addon-legacy-853196 addons enable ingress --alsologtostderr -v=5
ingress_addon_legacy_test.go:70: (dbg) Done: out/minikube-linux-arm64 -p ingress-addon-legacy-853196 addons enable ingress --alsologtostderr -v=5: (10.593701543s)
--- PASS: TestIngressAddonLegacy/serial/ValidateIngressAddonActivation (10.59s)

                                                
                                    
x
+
TestIngressAddonLegacy/serial/ValidateIngressDNSAddonActivation (0.7s)

                                                
                                                
=== RUN   TestIngressAddonLegacy/serial/ValidateIngressDNSAddonActivation
ingress_addon_legacy_test.go:79: (dbg) Run:  out/minikube-linux-arm64 -p ingress-addon-legacy-853196 addons enable ingress-dns --alsologtostderr -v=5
--- PASS: TestIngressAddonLegacy/serial/ValidateIngressDNSAddonActivation (0.70s)

                                                
                                    
x
+
TestJSONOutput/start/Command (85.36s)

                                                
                                                
=== RUN   TestJSONOutput/start/Command
json_output_test.go:63: (dbg) Run:  out/minikube-linux-arm64 start -p json-output-356974 --output=json --user=testUser --memory=2200 --wait=true --driver=docker  --container-runtime=containerd
E1201 19:05:39.932929  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/client.crt: no such file or directory
E1201 19:06:00.413120  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/client.crt: no such file or directory
E1201 19:06:41.373374  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/client.crt: no such file or directory
json_output_test.go:63: (dbg) Done: out/minikube-linux-arm64 start -p json-output-356974 --output=json --user=testUser --memory=2200 --wait=true --driver=docker  --container-runtime=containerd: (1m25.356343169s)
--- PASS: TestJSONOutput/start/Command (85.36s)

                                                
                                    
x
+
TestJSONOutput/start/Audit (0s)

                                                
                                                
=== RUN   TestJSONOutput/start/Audit
--- PASS: TestJSONOutput/start/Audit (0.00s)

                                                
                                    
x
+
TestJSONOutput/start/parallel/DistinctCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/start/parallel/DistinctCurrentSteps
=== PAUSE TestJSONOutput/start/parallel/DistinctCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/start/parallel/DistinctCurrentSteps
--- PASS: TestJSONOutput/start/parallel/DistinctCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/start/parallel/IncreasingCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/start/parallel/IncreasingCurrentSteps
=== PAUSE TestJSONOutput/start/parallel/IncreasingCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/start/parallel/IncreasingCurrentSteps
--- PASS: TestJSONOutput/start/parallel/IncreasingCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/pause/Command (0.84s)

                                                
                                                
=== RUN   TestJSONOutput/pause/Command
json_output_test.go:63: (dbg) Run:  out/minikube-linux-arm64 pause -p json-output-356974 --output=json --user=testUser
--- PASS: TestJSONOutput/pause/Command (0.84s)

                                                
                                    
x
+
TestJSONOutput/pause/Audit (0s)

                                                
                                                
=== RUN   TestJSONOutput/pause/Audit
--- PASS: TestJSONOutput/pause/Audit (0.00s)

                                                
                                    
x
+
TestJSONOutput/pause/parallel/DistinctCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/pause/parallel/DistinctCurrentSteps
=== PAUSE TestJSONOutput/pause/parallel/DistinctCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/pause/parallel/DistinctCurrentSteps
--- PASS: TestJSONOutput/pause/parallel/DistinctCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/pause/parallel/IncreasingCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/pause/parallel/IncreasingCurrentSteps
=== PAUSE TestJSONOutput/pause/parallel/IncreasingCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/pause/parallel/IncreasingCurrentSteps
--- PASS: TestJSONOutput/pause/parallel/IncreasingCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/unpause/Command (0.77s)

                                                
                                                
=== RUN   TestJSONOutput/unpause/Command
json_output_test.go:63: (dbg) Run:  out/minikube-linux-arm64 unpause -p json-output-356974 --output=json --user=testUser
--- PASS: TestJSONOutput/unpause/Command (0.77s)

                                                
                                    
x
+
TestJSONOutput/unpause/Audit (0s)

                                                
                                                
=== RUN   TestJSONOutput/unpause/Audit
--- PASS: TestJSONOutput/unpause/Audit (0.00s)

                                                
                                    
x
+
TestJSONOutput/unpause/parallel/DistinctCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/unpause/parallel/DistinctCurrentSteps
=== PAUSE TestJSONOutput/unpause/parallel/DistinctCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/unpause/parallel/DistinctCurrentSteps
--- PASS: TestJSONOutput/unpause/parallel/DistinctCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/unpause/parallel/IncreasingCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/unpause/parallel/IncreasingCurrentSteps
=== PAUSE TestJSONOutput/unpause/parallel/IncreasingCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/unpause/parallel/IncreasingCurrentSteps
--- PASS: TestJSONOutput/unpause/parallel/IncreasingCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/stop/Command (5.81s)

                                                
                                                
=== RUN   TestJSONOutput/stop/Command
json_output_test.go:63: (dbg) Run:  out/minikube-linux-arm64 stop -p json-output-356974 --output=json --user=testUser
json_output_test.go:63: (dbg) Done: out/minikube-linux-arm64 stop -p json-output-356974 --output=json --user=testUser: (5.809295668s)
--- PASS: TestJSONOutput/stop/Command (5.81s)

                                                
                                    
x
+
TestJSONOutput/stop/Audit (0s)

                                                
                                                
=== RUN   TestJSONOutput/stop/Audit
--- PASS: TestJSONOutput/stop/Audit (0.00s)

                                                
                                    
x
+
TestJSONOutput/stop/parallel/DistinctCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/stop/parallel/DistinctCurrentSteps
=== PAUSE TestJSONOutput/stop/parallel/DistinctCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/stop/parallel/DistinctCurrentSteps
--- PASS: TestJSONOutput/stop/parallel/DistinctCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/stop/parallel/IncreasingCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/stop/parallel/IncreasingCurrentSteps
=== PAUSE TestJSONOutput/stop/parallel/IncreasingCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/stop/parallel/IncreasingCurrentSteps
--- PASS: TestJSONOutput/stop/parallel/IncreasingCurrentSteps (0.00s)

                                                
                                    
x
+
TestErrorJSONOutput (0.27s)

                                                
                                                
=== RUN   TestErrorJSONOutput
json_output_test.go:160: (dbg) Run:  out/minikube-linux-arm64 start -p json-output-error-455909 --memory=2200 --output=json --wait=true --driver=fail
json_output_test.go:160: (dbg) Non-zero exit: out/minikube-linux-arm64 start -p json-output-error-455909 --memory=2200 --output=json --wait=true --driver=fail: exit status 56 (103.494316ms)

                                                
                                                
-- stdout --
	{"specversion":"1.0","id":"e198c55b-b91c-4d5d-9b2a-e4ac1af3eef9","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"0","message":"[json-output-error-455909] minikube v1.32.0 on Ubuntu 20.04 (arm64)","name":"Initial Minikube Setup","totalsteps":"19"}}
	{"specversion":"1.0","id":"f615c375-cdf3-466b-bb71-3884baf76693","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_LOCATION=17703"}}
	{"specversion":"1.0","id":"0afcd8c8-9ef5-4fa5-ab4c-1f4b92198a55","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true"}}
	{"specversion":"1.0","id":"5bb4f319-f2e3-4634-b46f-6fd5641247d8","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"KUBECONFIG=/home/jenkins/minikube-integration/17703-252966/kubeconfig"}}
	{"specversion":"1.0","id":"c7f4047a-89e9-493d-8598-f01f1baf50cb","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_HOME=/home/jenkins/minikube-integration/17703-252966/.minikube"}}
	{"specversion":"1.0","id":"f2d472b1-f520-4add-b55f-53bc9387e453","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_BIN=out/minikube-linux-arm64"}}
	{"specversion":"1.0","id":"b673c934-1345-4857-a5b1-96ad614b6a0a","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_FORCE_SYSTEMD="}}
	{"specversion":"1.0","id":"4b428ef9-77e3-4c23-b0c9-6d4c02b1918a","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.error","datacontenttype":"application/json","data":{"advice":"","exitcode":"56","issues":"","message":"The driver 'fail' is not supported on linux/arm64","name":"DRV_UNSUPPORTED_OS","url":""}}

                                                
                                                
-- /stdout --
helpers_test.go:175: Cleaning up "json-output-error-455909" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p json-output-error-455909
--- PASS: TestErrorJSONOutput (0.27s)

                                                
                                    
x
+
TestKicCustomNetwork/create_custom_network (50.1s)

                                                
                                                
=== RUN   TestKicCustomNetwork/create_custom_network
kic_custom_network_test.go:57: (dbg) Run:  out/minikube-linux-arm64 start -p docker-network-929829 --network=
kic_custom_network_test.go:57: (dbg) Done: out/minikube-linux-arm64 start -p docker-network-929829 --network=: (47.989531632s)
kic_custom_network_test.go:150: (dbg) Run:  docker network ls --format {{.Name}}
helpers_test.go:175: Cleaning up "docker-network-929829" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p docker-network-929829
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p docker-network-929829: (2.08736417s)
--- PASS: TestKicCustomNetwork/create_custom_network (50.10s)

                                                
                                    
x
+
TestKicCustomNetwork/use_default_bridge_network (34.62s)

                                                
                                                
=== RUN   TestKicCustomNetwork/use_default_bridge_network
kic_custom_network_test.go:57: (dbg) Run:  out/minikube-linux-arm64 start -p docker-network-097214 --network=bridge
E1201 19:08:03.293618  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/client.crt: no such file or directory
kic_custom_network_test.go:57: (dbg) Done: out/minikube-linux-arm64 start -p docker-network-097214 --network=bridge: (32.563980808s)
kic_custom_network_test.go:150: (dbg) Run:  docker network ls --format {{.Name}}
helpers_test.go:175: Cleaning up "docker-network-097214" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p docker-network-097214
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p docker-network-097214: (2.028840479s)
--- PASS: TestKicCustomNetwork/use_default_bridge_network (34.62s)

                                                
                                    
x
+
TestKicExistingNetwork (38.23s)

                                                
                                                
=== RUN   TestKicExistingNetwork
kic_custom_network_test.go:150: (dbg) Run:  docker network ls --format {{.Name}}
kic_custom_network_test.go:93: (dbg) Run:  out/minikube-linux-arm64 start -p existing-network-030017 --network=existing-network
kic_custom_network_test.go:93: (dbg) Done: out/minikube-linux-arm64 start -p existing-network-030017 --network=existing-network: (36.018754s)
helpers_test.go:175: Cleaning up "existing-network-030017" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p existing-network-030017
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p existing-network-030017: (2.042013709s)
--- PASS: TestKicExistingNetwork (38.23s)

                                                
                                    
x
+
TestKicCustomSubnet (35.5s)

                                                
                                                
=== RUN   TestKicCustomSubnet
kic_custom_network_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p custom-subnet-222500 --subnet=192.168.60.0/24
E1201 19:09:24.386852  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.crt: no such file or directory
E1201 19:09:37.131145  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.crt: no such file or directory
E1201 19:09:37.136393  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.crt: no such file or directory
E1201 19:09:37.146609  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.crt: no such file or directory
E1201 19:09:37.166840  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.crt: no such file or directory
E1201 19:09:37.207071  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.crt: no such file or directory
E1201 19:09:37.287349  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.crt: no such file or directory
E1201 19:09:37.447657  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.crt: no such file or directory
E1201 19:09:37.768150  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.crt: no such file or directory
E1201 19:09:38.409089  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.crt: no such file or directory
E1201 19:09:39.689663  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.crt: no such file or directory
E1201 19:09:42.252359  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.crt: no such file or directory
kic_custom_network_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p custom-subnet-222500 --subnet=192.168.60.0/24: (33.286199274s)
kic_custom_network_test.go:161: (dbg) Run:  docker network inspect custom-subnet-222500 --format "{{(index .IPAM.Config 0).Subnet}}"
helpers_test.go:175: Cleaning up "custom-subnet-222500" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p custom-subnet-222500
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p custom-subnet-222500: (2.19374172s)
--- PASS: TestKicCustomSubnet (35.50s)

                                                
                                    
x
+
TestKicStaticIP (37.76s)

                                                
                                                
=== RUN   TestKicStaticIP
kic_custom_network_test.go:132: (dbg) Run:  out/minikube-linux-arm64 start -p static-ip-176115 --static-ip=192.168.200.200
E1201 19:09:47.373402  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.crt: no such file or directory
E1201 19:09:57.613618  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.crt: no such file or directory
E1201 19:10:18.093925  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.crt: no such file or directory
E1201 19:10:19.452448  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/client.crt: no such file or directory
kic_custom_network_test.go:132: (dbg) Done: out/minikube-linux-arm64 start -p static-ip-176115 --static-ip=192.168.200.200: (35.331135921s)
kic_custom_network_test.go:138: (dbg) Run:  out/minikube-linux-arm64 -p static-ip-176115 ip
helpers_test.go:175: Cleaning up "static-ip-176115" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p static-ip-176115
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p static-ip-176115: (2.234409452s)
--- PASS: TestKicStaticIP (37.76s)

                                                
                                    
x
+
TestMainNoArgs (0.07s)

                                                
                                                
=== RUN   TestMainNoArgs
main_test.go:68: (dbg) Run:  out/minikube-linux-arm64
--- PASS: TestMainNoArgs (0.07s)

                                                
                                    
x
+
TestMinikubeProfile (66.2s)

                                                
                                                
=== RUN   TestMinikubeProfile
minikube_profile_test.go:44: (dbg) Run:  out/minikube-linux-arm64 start -p first-049120 --driver=docker  --container-runtime=containerd
E1201 19:10:47.134210  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/client.crt: no such file or directory
minikube_profile_test.go:44: (dbg) Done: out/minikube-linux-arm64 start -p first-049120 --driver=docker  --container-runtime=containerd: (30.222112292s)
minikube_profile_test.go:44: (dbg) Run:  out/minikube-linux-arm64 start -p second-051672 --driver=docker  --container-runtime=containerd
E1201 19:10:59.054948  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.crt: no such file or directory
minikube_profile_test.go:44: (dbg) Done: out/minikube-linux-arm64 start -p second-051672 --driver=docker  --container-runtime=containerd: (30.241803069s)
minikube_profile_test.go:51: (dbg) Run:  out/minikube-linux-arm64 profile first-049120
minikube_profile_test.go:55: (dbg) Run:  out/minikube-linux-arm64 profile list -ojson
minikube_profile_test.go:51: (dbg) Run:  out/minikube-linux-arm64 profile second-051672
minikube_profile_test.go:55: (dbg) Run:  out/minikube-linux-arm64 profile list -ojson
helpers_test.go:175: Cleaning up "second-051672" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p second-051672
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p second-051672: (2.022563946s)
helpers_test.go:175: Cleaning up "first-049120" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p first-049120
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p first-049120: (2.349310509s)
--- PASS: TestMinikubeProfile (66.20s)

                                                
                                    
x
+
TestMountStart/serial/StartWithMountFirst (9.65s)

                                                
                                                
=== RUN   TestMountStart/serial/StartWithMountFirst
mount_start_test.go:98: (dbg) Run:  out/minikube-linux-arm64 start -p mount-start-1-786263 --memory=2048 --mount --mount-gid 0 --mount-msize 6543 --mount-port 46464 --mount-uid 0 --no-kubernetes --driver=docker  --container-runtime=containerd
mount_start_test.go:98: (dbg) Done: out/minikube-linux-arm64 start -p mount-start-1-786263 --memory=2048 --mount --mount-gid 0 --mount-msize 6543 --mount-port 46464 --mount-uid 0 --no-kubernetes --driver=docker  --container-runtime=containerd: (8.65159825s)
--- PASS: TestMountStart/serial/StartWithMountFirst (9.65s)

                                                
                                    
x
+
TestMountStart/serial/VerifyMountFirst (0.3s)

                                                
                                                
=== RUN   TestMountStart/serial/VerifyMountFirst
mount_start_test.go:114: (dbg) Run:  out/minikube-linux-arm64 -p mount-start-1-786263 ssh -- ls /minikube-host
--- PASS: TestMountStart/serial/VerifyMountFirst (0.30s)

                                                
                                    
x
+
TestMountStart/serial/StartWithMountSecond (7.67s)

                                                
                                                
=== RUN   TestMountStart/serial/StartWithMountSecond
mount_start_test.go:98: (dbg) Run:  out/minikube-linux-arm64 start -p mount-start-2-788003 --memory=2048 --mount --mount-gid 0 --mount-msize 6543 --mount-port 46465 --mount-uid 0 --no-kubernetes --driver=docker  --container-runtime=containerd
mount_start_test.go:98: (dbg) Done: out/minikube-linux-arm64 start -p mount-start-2-788003 --memory=2048 --mount --mount-gid 0 --mount-msize 6543 --mount-port 46465 --mount-uid 0 --no-kubernetes --driver=docker  --container-runtime=containerd: (6.672348234s)
--- PASS: TestMountStart/serial/StartWithMountSecond (7.67s)

                                                
                                    
x
+
TestMountStart/serial/VerifyMountSecond (0.31s)

                                                
                                                
=== RUN   TestMountStart/serial/VerifyMountSecond
mount_start_test.go:114: (dbg) Run:  out/minikube-linux-arm64 -p mount-start-2-788003 ssh -- ls /minikube-host
--- PASS: TestMountStart/serial/VerifyMountSecond (0.31s)

                                                
                                    
x
+
TestMountStart/serial/DeleteFirst (1.71s)

                                                
                                                
=== RUN   TestMountStart/serial/DeleteFirst
pause_test.go:132: (dbg) Run:  out/minikube-linux-arm64 delete -p mount-start-1-786263 --alsologtostderr -v=5
pause_test.go:132: (dbg) Done: out/minikube-linux-arm64 delete -p mount-start-1-786263 --alsologtostderr -v=5: (1.714210382s)
--- PASS: TestMountStart/serial/DeleteFirst (1.71s)

                                                
                                    
x
+
TestMountStart/serial/VerifyMountPostDelete (0.3s)

                                                
                                                
=== RUN   TestMountStart/serial/VerifyMountPostDelete
mount_start_test.go:114: (dbg) Run:  out/minikube-linux-arm64 -p mount-start-2-788003 ssh -- ls /minikube-host
--- PASS: TestMountStart/serial/VerifyMountPostDelete (0.30s)

                                                
                                    
x
+
TestMountStart/serial/Stop (1.24s)

                                                
                                                
=== RUN   TestMountStart/serial/Stop
mount_start_test.go:155: (dbg) Run:  out/minikube-linux-arm64 stop -p mount-start-2-788003
mount_start_test.go:155: (dbg) Done: out/minikube-linux-arm64 stop -p mount-start-2-788003: (1.244373367s)
--- PASS: TestMountStart/serial/Stop (1.24s)

                                                
                                    
x
+
TestMountStart/serial/RestartStopped (7.71s)

                                                
                                                
=== RUN   TestMountStart/serial/RestartStopped
mount_start_test.go:166: (dbg) Run:  out/minikube-linux-arm64 start -p mount-start-2-788003
mount_start_test.go:166: (dbg) Done: out/minikube-linux-arm64 start -p mount-start-2-788003: (6.710917439s)
--- PASS: TestMountStart/serial/RestartStopped (7.71s)

                                                
                                    
x
+
TestMountStart/serial/VerifyMountPostStop (0.42s)

                                                
                                                
=== RUN   TestMountStart/serial/VerifyMountPostStop
mount_start_test.go:114: (dbg) Run:  out/minikube-linux-arm64 -p mount-start-2-788003 ssh -- ls /minikube-host
--- PASS: TestMountStart/serial/VerifyMountPostStop (0.42s)

                                                
                                    
x
+
TestMultiNode/serial/FreshStart2Nodes (105.92s)

                                                
                                                
=== RUN   TestMultiNode/serial/FreshStart2Nodes
multinode_test.go:86: (dbg) Run:  out/minikube-linux-arm64 start -p multinode-651909 --wait=true --memory=2200 --nodes=2 -v=8 --alsologtostderr --driver=docker  --container-runtime=containerd
E1201 19:12:20.975605  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.crt: no such file or directory
multinode_test.go:86: (dbg) Done: out/minikube-linux-arm64 start -p multinode-651909 --wait=true --memory=2200 --nodes=2 -v=8 --alsologtostderr --driver=docker  --container-runtime=containerd: (1m45.321062343s)
multinode_test.go:92: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 status --alsologtostderr
--- PASS: TestMultiNode/serial/FreshStart2Nodes (105.92s)

                                                
                                    
x
+
TestMultiNode/serial/DeployApp2Nodes (4.96s)

                                                
                                                
=== RUN   TestMultiNode/serial/DeployApp2Nodes
multinode_test.go:509: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-651909 -- apply -f ./testdata/multinodes/multinode-pod-dns-test.yaml
multinode_test.go:514: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-651909 -- rollout status deployment/busybox
multinode_test.go:514: (dbg) Done: out/minikube-linux-arm64 kubectl -p multinode-651909 -- rollout status deployment/busybox: (2.681325095s)
multinode_test.go:521: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-651909 -- get pods -o jsonpath='{.items[*].status.podIP}'
multinode_test.go:544: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-651909 -- get pods -o jsonpath='{.items[*].metadata.name}'
multinode_test.go:552: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-651909 -- exec busybox-5bc68d56bd-2txnb -- nslookup kubernetes.io
multinode_test.go:552: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-651909 -- exec busybox-5bc68d56bd-cb8sv -- nslookup kubernetes.io
multinode_test.go:562: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-651909 -- exec busybox-5bc68d56bd-2txnb -- nslookup kubernetes.default
multinode_test.go:562: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-651909 -- exec busybox-5bc68d56bd-cb8sv -- nslookup kubernetes.default
multinode_test.go:570: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-651909 -- exec busybox-5bc68d56bd-2txnb -- nslookup kubernetes.default.svc.cluster.local
multinode_test.go:570: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-651909 -- exec busybox-5bc68d56bd-cb8sv -- nslookup kubernetes.default.svc.cluster.local
--- PASS: TestMultiNode/serial/DeployApp2Nodes (4.96s)

                                                
                                    
x
+
TestMultiNode/serial/PingHostFrom2Pods (1.22s)

                                                
                                                
=== RUN   TestMultiNode/serial/PingHostFrom2Pods
multinode_test.go:580: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-651909 -- get pods -o jsonpath='{.items[*].metadata.name}'
multinode_test.go:588: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-651909 -- exec busybox-5bc68d56bd-2txnb -- sh -c "nslookup host.minikube.internal | awk 'NR==5' | cut -d' ' -f3"
multinode_test.go:599: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-651909 -- exec busybox-5bc68d56bd-2txnb -- sh -c "ping -c 1 192.168.58.1"
multinode_test.go:588: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-651909 -- exec busybox-5bc68d56bd-cb8sv -- sh -c "nslookup host.minikube.internal | awk 'NR==5' | cut -d' ' -f3"
multinode_test.go:599: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-651909 -- exec busybox-5bc68d56bd-cb8sv -- sh -c "ping -c 1 192.168.58.1"
--- PASS: TestMultiNode/serial/PingHostFrom2Pods (1.22s)

                                                
                                    
x
+
TestMultiNode/serial/AddNode (21.47s)

                                                
                                                
=== RUN   TestMultiNode/serial/AddNode
multinode_test.go:111: (dbg) Run:  out/minikube-linux-arm64 node add -p multinode-651909 -v 3 --alsologtostderr
multinode_test.go:111: (dbg) Done: out/minikube-linux-arm64 node add -p multinode-651909 -v 3 --alsologtostderr: (20.68076581s)
multinode_test.go:117: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 status --alsologtostderr
--- PASS: TestMultiNode/serial/AddNode (21.47s)

                                                
                                    
x
+
TestMultiNode/serial/MultiNodeLabels (0.1s)

                                                
                                                
=== RUN   TestMultiNode/serial/MultiNodeLabels
multinode_test.go:211: (dbg) Run:  kubectl --context multinode-651909 get nodes -o "jsonpath=[{range .items[*]}{.metadata.labels},{end}]"
--- PASS: TestMultiNode/serial/MultiNodeLabels (0.10s)

                                                
                                    
x
+
TestMultiNode/serial/ProfileList (0.39s)

                                                
                                                
=== RUN   TestMultiNode/serial/ProfileList
multinode_test.go:133: (dbg) Run:  out/minikube-linux-arm64 profile list --output json
--- PASS: TestMultiNode/serial/ProfileList (0.39s)

                                                
                                    
x
+
TestMultiNode/serial/CopyFile (11.9s)

                                                
                                                
=== RUN   TestMultiNode/serial/CopyFile
multinode_test.go:174: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 status --output json --alsologtostderr
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 cp testdata/cp-test.txt multinode-651909:/home/docker/cp-test.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 ssh -n multinode-651909 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 cp multinode-651909:/home/docker/cp-test.txt /tmp/TestMultiNodeserialCopyFile2522025678/001/cp-test_multinode-651909.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 ssh -n multinode-651909 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 cp multinode-651909:/home/docker/cp-test.txt multinode-651909-m02:/home/docker/cp-test_multinode-651909_multinode-651909-m02.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 ssh -n multinode-651909 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 ssh -n multinode-651909-m02 "sudo cat /home/docker/cp-test_multinode-651909_multinode-651909-m02.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 cp multinode-651909:/home/docker/cp-test.txt multinode-651909-m03:/home/docker/cp-test_multinode-651909_multinode-651909-m03.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 ssh -n multinode-651909 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 ssh -n multinode-651909-m03 "sudo cat /home/docker/cp-test_multinode-651909_multinode-651909-m03.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 cp testdata/cp-test.txt multinode-651909-m02:/home/docker/cp-test.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 ssh -n multinode-651909-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 cp multinode-651909-m02:/home/docker/cp-test.txt /tmp/TestMultiNodeserialCopyFile2522025678/001/cp-test_multinode-651909-m02.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 ssh -n multinode-651909-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 cp multinode-651909-m02:/home/docker/cp-test.txt multinode-651909:/home/docker/cp-test_multinode-651909-m02_multinode-651909.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 ssh -n multinode-651909-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 ssh -n multinode-651909 "sudo cat /home/docker/cp-test_multinode-651909-m02_multinode-651909.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 cp multinode-651909-m02:/home/docker/cp-test.txt multinode-651909-m03:/home/docker/cp-test_multinode-651909-m02_multinode-651909-m03.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 ssh -n multinode-651909-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 ssh -n multinode-651909-m03 "sudo cat /home/docker/cp-test_multinode-651909-m02_multinode-651909-m03.txt"
E1201 19:14:24.386047  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.crt: no such file or directory
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 cp testdata/cp-test.txt multinode-651909-m03:/home/docker/cp-test.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 ssh -n multinode-651909-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 cp multinode-651909-m03:/home/docker/cp-test.txt /tmp/TestMultiNodeserialCopyFile2522025678/001/cp-test_multinode-651909-m03.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 ssh -n multinode-651909-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 cp multinode-651909-m03:/home/docker/cp-test.txt multinode-651909:/home/docker/cp-test_multinode-651909-m03_multinode-651909.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 ssh -n multinode-651909-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 ssh -n multinode-651909 "sudo cat /home/docker/cp-test_multinode-651909-m03_multinode-651909.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 cp multinode-651909-m03:/home/docker/cp-test.txt multinode-651909-m02:/home/docker/cp-test_multinode-651909-m03_multinode-651909-m02.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 ssh -n multinode-651909-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 ssh -n multinode-651909-m02 "sudo cat /home/docker/cp-test_multinode-651909-m03_multinode-651909-m02.txt"
--- PASS: TestMultiNode/serial/CopyFile (11.90s)

                                                
                                    
x
+
TestMultiNode/serial/StopNode (2.53s)

                                                
                                                
=== RUN   TestMultiNode/serial/StopNode
multinode_test.go:238: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 node stop m03
multinode_test.go:238: (dbg) Done: out/minikube-linux-arm64 -p multinode-651909 node stop m03: (1.266644783s)
multinode_test.go:244: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 status
multinode_test.go:244: (dbg) Non-zero exit: out/minikube-linux-arm64 -p multinode-651909 status: exit status 7 (632.90415ms)

                                                
                                                
-- stdout --
	multinode-651909
	type: Control Plane
	host: Running
	kubelet: Running
	apiserver: Running
	kubeconfig: Configured
	
	multinode-651909-m02
	type: Worker
	host: Running
	kubelet: Running
	
	multinode-651909-m03
	type: Worker
	host: Stopped
	kubelet: Stopped
	

                                                
                                                
-- /stdout --
multinode_test.go:251: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 status --alsologtostderr
multinode_test.go:251: (dbg) Non-zero exit: out/minikube-linux-arm64 -p multinode-651909 status --alsologtostderr: exit status 7 (626.747342ms)

                                                
                                                
-- stdout --
	multinode-651909
	type: Control Plane
	host: Running
	kubelet: Running
	apiserver: Running
	kubeconfig: Configured
	
	multinode-651909-m02
	type: Worker
	host: Running
	kubelet: Running
	
	multinode-651909-m03
	type: Worker
	host: Stopped
	kubelet: Stopped
	

                                                
                                                
-- /stdout --
** stderr ** 
	I1201 19:14:30.225442  339614 out.go:296] Setting OutFile to fd 1 ...
	I1201 19:14:30.225660  339614 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I1201 19:14:30.225687  339614 out.go:309] Setting ErrFile to fd 2...
	I1201 19:14:30.225705  339614 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I1201 19:14:30.226027  339614 root.go:338] Updating PATH: /home/jenkins/minikube-integration/17703-252966/.minikube/bin
	I1201 19:14:30.226268  339614 out.go:303] Setting JSON to false
	I1201 19:14:30.226424  339614 mustload.go:65] Loading cluster: multinode-651909
	I1201 19:14:30.226539  339614 notify.go:220] Checking for updates...
	I1201 19:14:30.226996  339614 config.go:182] Loaded profile config "multinode-651909": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.4
	I1201 19:14:30.227034  339614 status.go:255] checking status of multinode-651909 ...
	I1201 19:14:30.227790  339614 cli_runner.go:164] Run: docker container inspect multinode-651909 --format={{.State.Status}}
	I1201 19:14:30.248628  339614 status.go:330] multinode-651909 host status = "Running" (err=<nil>)
	I1201 19:14:30.248673  339614 host.go:66] Checking if "multinode-651909" exists ...
	I1201 19:14:30.248980  339614 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" multinode-651909
	I1201 19:14:30.281802  339614 host.go:66] Checking if "multinode-651909" exists ...
	I1201 19:14:30.282099  339614 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I1201 19:14:30.282142  339614 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-651909
	I1201 19:14:30.304102  339614 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33163 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/multinode-651909/id_rsa Username:docker}
	I1201 19:14:30.411339  339614 ssh_runner.go:195] Run: systemctl --version
	I1201 19:14:30.417516  339614 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I1201 19:14:30.431909  339614 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I1201 19:14:30.509205  339614 info.go:266] docker info: {ID:EOU5:DNGX:XN6V:L2FZ:UXRM:5TWK:EVUR:KC2F:GT7Z:Y4O4:GB77:5PD3 Containers:3 ContainersRunning:2 ContainersPaused:0 ContainersStopped:1 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:40 OomKillDisable:true NGoroutines:55 SystemTime:2023-12-01 19:14:30.499024843 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1050-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archi
tecture:aarch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8215040000 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-31-251 Labels:[] ExperimentalBuild:false ServerVersion:24.0.7 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f Expected:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f} RuncCommit:{ID:v1.1.10-0-g18a0cb0 Expected:v1.1.10-0-g18a0cb0} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.21.0]] Warnings:<nil>}}
	I1201 19:14:30.509878  339614 kubeconfig.go:92] found "multinode-651909" server: "https://192.168.58.2:8443"
	I1201 19:14:30.509909  339614 api_server.go:166] Checking apiserver status ...
	I1201 19:14:30.509954  339614 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I1201 19:14:30.524859  339614 ssh_runner.go:195] Run: sudo egrep ^[0-9]+:freezer: /proc/1262/cgroup
	I1201 19:14:30.537351  339614 api_server.go:182] apiserver freezer: "12:freezer:/docker/d5fa3c6a6874c37c647d80f2518cbf5da92afa0e2acb6fc6e294ab7b8f8faa0c/kubepods/burstable/pod42edb9c85998e5e53dd9df3fc0ee4b17/ee630b72ca03773df1b7fd82b5238b9c8bbeb0a40f1c4c4a25f7dc7ee6cc8a68"
	I1201 19:14:30.537488  339614 ssh_runner.go:195] Run: sudo cat /sys/fs/cgroup/freezer/docker/d5fa3c6a6874c37c647d80f2518cbf5da92afa0e2acb6fc6e294ab7b8f8faa0c/kubepods/burstable/pod42edb9c85998e5e53dd9df3fc0ee4b17/ee630b72ca03773df1b7fd82b5238b9c8bbeb0a40f1c4c4a25f7dc7ee6cc8a68/freezer.state
	I1201 19:14:30.548336  339614 api_server.go:204] freezer state: "THAWED"
	I1201 19:14:30.548367  339614 api_server.go:253] Checking apiserver healthz at https://192.168.58.2:8443/healthz ...
	I1201 19:14:30.557794  339614 api_server.go:279] https://192.168.58.2:8443/healthz returned 200:
	ok
	I1201 19:14:30.557827  339614 status.go:421] multinode-651909 apiserver status = Running (err=<nil>)
	I1201 19:14:30.557840  339614 status.go:257] multinode-651909 status: &{Name:multinode-651909 Host:Running Kubelet:Running APIServer:Running Kubeconfig:Configured Worker:false TimeToStop: DockerEnv: PodManEnv:}
	I1201 19:14:30.557856  339614 status.go:255] checking status of multinode-651909-m02 ...
	I1201 19:14:30.558179  339614 cli_runner.go:164] Run: docker container inspect multinode-651909-m02 --format={{.State.Status}}
	I1201 19:14:30.576778  339614 status.go:330] multinode-651909-m02 host status = "Running" (err=<nil>)
	I1201 19:14:30.576836  339614 host.go:66] Checking if "multinode-651909-m02" exists ...
	I1201 19:14:30.577138  339614 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" multinode-651909-m02
	I1201 19:14:30.596162  339614 host.go:66] Checking if "multinode-651909-m02" exists ...
	I1201 19:14:30.596667  339614 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I1201 19:14:30.596730  339614 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-651909-m02
	I1201 19:14:30.620004  339614 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33168 SSHKeyPath:/home/jenkins/minikube-integration/17703-252966/.minikube/machines/multinode-651909-m02/id_rsa Username:docker}
	I1201 19:14:30.723408  339614 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I1201 19:14:30.738360  339614 status.go:257] multinode-651909-m02 status: &{Name:multinode-651909-m02 Host:Running Kubelet:Running APIServer:Irrelevant Kubeconfig:Irrelevant Worker:true TimeToStop: DockerEnv: PodManEnv:}
	I1201 19:14:30.738415  339614 status.go:255] checking status of multinode-651909-m03 ...
	I1201 19:14:30.738739  339614 cli_runner.go:164] Run: docker container inspect multinode-651909-m03 --format={{.State.Status}}
	I1201 19:14:30.758882  339614 status.go:330] multinode-651909-m03 host status = "Stopped" (err=<nil>)
	I1201 19:14:30.758903  339614 status.go:343] host is not running, skipping remaining checks
	I1201 19:14:30.758911  339614 status.go:257] multinode-651909-m03 status: &{Name:multinode-651909-m03 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:true TimeToStop: DockerEnv: PodManEnv:}

                                                
                                                
** /stderr **
--- PASS: TestMultiNode/serial/StopNode (2.53s)

                                                
                                    
x
+
TestMultiNode/serial/StartAfterStop (12.97s)

                                                
                                                
=== RUN   TestMultiNode/serial/StartAfterStop
multinode_test.go:272: (dbg) Run:  docker version -f {{.Server.Version}}
multinode_test.go:282: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 node start m03 --alsologtostderr
E1201 19:14:37.130943  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.crt: no such file or directory
multinode_test.go:282: (dbg) Done: out/minikube-linux-arm64 -p multinode-651909 node start m03 --alsologtostderr: (12.042599241s)
multinode_test.go:289: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 status
multinode_test.go:303: (dbg) Run:  kubectl get nodes
--- PASS: TestMultiNode/serial/StartAfterStop (12.97s)

                                                
                                    
x
+
TestMultiNode/serial/RestartKeepsNodes (122.12s)

                                                
                                                
=== RUN   TestMultiNode/serial/RestartKeepsNodes
multinode_test.go:311: (dbg) Run:  out/minikube-linux-arm64 node list -p multinode-651909
multinode_test.go:318: (dbg) Run:  out/minikube-linux-arm64 stop -p multinode-651909
E1201 19:15:04.816203  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.crt: no such file or directory
multinode_test.go:318: (dbg) Done: out/minikube-linux-arm64 stop -p multinode-651909: (25.511503719s)
multinode_test.go:323: (dbg) Run:  out/minikube-linux-arm64 start -p multinode-651909 --wait=true -v=8 --alsologtostderr
E1201 19:15:19.452223  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/client.crt: no such file or directory
E1201 19:15:47.433276  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.crt: no such file or directory
multinode_test.go:323: (dbg) Done: out/minikube-linux-arm64 start -p multinode-651909 --wait=true -v=8 --alsologtostderr: (1m36.425803614s)
multinode_test.go:328: (dbg) Run:  out/minikube-linux-arm64 node list -p multinode-651909
--- PASS: TestMultiNode/serial/RestartKeepsNodes (122.12s)

                                                
                                    
x
+
TestMultiNode/serial/DeleteNode (5.22s)

                                                
                                                
=== RUN   TestMultiNode/serial/DeleteNode
multinode_test.go:422: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 node delete m03
multinode_test.go:422: (dbg) Done: out/minikube-linux-arm64 -p multinode-651909 node delete m03: (4.438657203s)
multinode_test.go:428: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 status --alsologtostderr
multinode_test.go:442: (dbg) Run:  docker volume ls
multinode_test.go:452: (dbg) Run:  kubectl get nodes
multinode_test.go:460: (dbg) Run:  kubectl get nodes -o "go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'"
--- PASS: TestMultiNode/serial/DeleteNode (5.22s)

                                                
                                    
x
+
TestMultiNode/serial/StopMultiNode (24.47s)

                                                
                                                
=== RUN   TestMultiNode/serial/StopMultiNode
multinode_test.go:342: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 stop
multinode_test.go:342: (dbg) Done: out/minikube-linux-arm64 -p multinode-651909 stop: (24.254044128s)
multinode_test.go:348: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 status
multinode_test.go:348: (dbg) Non-zero exit: out/minikube-linux-arm64 -p multinode-651909 status: exit status 7 (107.457481ms)

                                                
                                                
-- stdout --
	multinode-651909
	type: Control Plane
	host: Stopped
	kubelet: Stopped
	apiserver: Stopped
	kubeconfig: Stopped
	
	multinode-651909-m02
	type: Worker
	host: Stopped
	kubelet: Stopped
	

                                                
                                                
-- /stdout --
multinode_test.go:355: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 status --alsologtostderr
multinode_test.go:355: (dbg) Non-zero exit: out/minikube-linux-arm64 -p multinode-651909 status --alsologtostderr: exit status 7 (111.299923ms)

                                                
                                                
-- stdout --
	multinode-651909
	type: Control Plane
	host: Stopped
	kubelet: Stopped
	apiserver: Stopped
	kubeconfig: Stopped
	
	multinode-651909-m02
	type: Worker
	host: Stopped
	kubelet: Stopped
	

                                                
                                                
-- /stdout --
** stderr ** 
	I1201 19:17:15.510931  348481 out.go:296] Setting OutFile to fd 1 ...
	I1201 19:17:15.511080  348481 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I1201 19:17:15.511090  348481 out.go:309] Setting ErrFile to fd 2...
	I1201 19:17:15.511096  348481 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I1201 19:17:15.511375  348481 root.go:338] Updating PATH: /home/jenkins/minikube-integration/17703-252966/.minikube/bin
	I1201 19:17:15.511556  348481 out.go:303] Setting JSON to false
	I1201 19:17:15.512605  348481 mustload.go:65] Loading cluster: multinode-651909
	I1201 19:17:15.512666  348481 notify.go:220] Checking for updates...
	I1201 19:17:15.513073  348481 config.go:182] Loaded profile config "multinode-651909": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.4
	I1201 19:17:15.513085  348481 status.go:255] checking status of multinode-651909 ...
	I1201 19:17:15.514501  348481 cli_runner.go:164] Run: docker container inspect multinode-651909 --format={{.State.Status}}
	I1201 19:17:15.533450  348481 status.go:330] multinode-651909 host status = "Stopped" (err=<nil>)
	I1201 19:17:15.533474  348481 status.go:343] host is not running, skipping remaining checks
	I1201 19:17:15.533481  348481 status.go:257] multinode-651909 status: &{Name:multinode-651909 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:false TimeToStop: DockerEnv: PodManEnv:}
	I1201 19:17:15.533516  348481 status.go:255] checking status of multinode-651909-m02 ...
	I1201 19:17:15.533820  348481 cli_runner.go:164] Run: docker container inspect multinode-651909-m02 --format={{.State.Status}}
	I1201 19:17:15.552848  348481 status.go:330] multinode-651909-m02 host status = "Stopped" (err=<nil>)
	I1201 19:17:15.552873  348481 status.go:343] host is not running, skipping remaining checks
	I1201 19:17:15.552882  348481 status.go:257] multinode-651909-m02 status: &{Name:multinode-651909-m02 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:true TimeToStop: DockerEnv: PodManEnv:}

                                                
                                                
** /stderr **
--- PASS: TestMultiNode/serial/StopMultiNode (24.47s)

                                                
                                    
x
+
TestMultiNode/serial/RestartMultiNode (81.81s)

                                                
                                                
=== RUN   TestMultiNode/serial/RestartMultiNode
multinode_test.go:372: (dbg) Run:  docker version -f {{.Server.Version}}
multinode_test.go:382: (dbg) Run:  out/minikube-linux-arm64 start -p multinode-651909 --wait=true -v=8 --alsologtostderr --driver=docker  --container-runtime=containerd
multinode_test.go:382: (dbg) Done: out/minikube-linux-arm64 start -p multinode-651909 --wait=true -v=8 --alsologtostderr --driver=docker  --container-runtime=containerd: (1m20.965785387s)
multinode_test.go:388: (dbg) Run:  out/minikube-linux-arm64 -p multinode-651909 status --alsologtostderr
multinode_test.go:402: (dbg) Run:  kubectl get nodes
multinode_test.go:410: (dbg) Run:  kubectl get nodes -o "go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'"
--- PASS: TestMultiNode/serial/RestartMultiNode (81.81s)

                                                
                                    
x
+
TestMultiNode/serial/ValidateNameConflict (37.33s)

                                                
                                                
=== RUN   TestMultiNode/serial/ValidateNameConflict
multinode_test.go:471: (dbg) Run:  out/minikube-linux-arm64 node list -p multinode-651909
multinode_test.go:480: (dbg) Run:  out/minikube-linux-arm64 start -p multinode-651909-m02 --driver=docker  --container-runtime=containerd
multinode_test.go:480: (dbg) Non-zero exit: out/minikube-linux-arm64 start -p multinode-651909-m02 --driver=docker  --container-runtime=containerd: exit status 14 (94.306704ms)

                                                
                                                
-- stdout --
	* [multinode-651909-m02] minikube v1.32.0 on Ubuntu 20.04 (arm64)
	  - MINIKUBE_LOCATION=17703
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/17703-252966/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/17703-252966/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-arm64
	  - MINIKUBE_FORCE_SYSTEMD=
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	! Profile name 'multinode-651909-m02' is duplicated with machine name 'multinode-651909-m02' in profile 'multinode-651909'
	X Exiting due to MK_USAGE: Profile name should be unique

                                                
                                                
** /stderr **
multinode_test.go:488: (dbg) Run:  out/minikube-linux-arm64 start -p multinode-651909-m03 --driver=docker  --container-runtime=containerd
multinode_test.go:488: (dbg) Done: out/minikube-linux-arm64 start -p multinode-651909-m03 --driver=docker  --container-runtime=containerd: (34.477217464s)
multinode_test.go:495: (dbg) Run:  out/minikube-linux-arm64 node add -p multinode-651909
multinode_test.go:495: (dbg) Non-zero exit: out/minikube-linux-arm64 node add -p multinode-651909: exit status 80 (628.516045ms)

                                                
                                                
-- stdout --
	* Adding node m03 to cluster multinode-651909
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	X Exiting due to GUEST_NODE_ADD: failed to add node: Node multinode-651909-m03 already exists in multinode-651909-m03 profile
	* 
	╭─────────────────────────────────────────────────────────────────────────────────────────────╮
	│                                                                                             │
	│    * If the above advice does not help, please let us know:                                 │
	│      https://github.com/kubernetes/minikube/issues/new/choose                               │
	│                                                                                             │
	│    * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue.    │
	│    * Please also attach the following file to the GitHub issue:                             │
	│    * - /tmp/minikube_node_040ea7097fd6ed71e65be9a474587f81f0ccd21d_0.log                    │
	│                                                                                             │
	╰─────────────────────────────────────────────────────────────────────────────────────────────╯

                                                
                                                
** /stderr **
multinode_test.go:500: (dbg) Run:  out/minikube-linux-arm64 delete -p multinode-651909-m03
multinode_test.go:500: (dbg) Done: out/minikube-linux-arm64 delete -p multinode-651909-m03: (2.055836545s)
--- PASS: TestMultiNode/serial/ValidateNameConflict (37.33s)

                                                
                                    
x
+
TestPreload (185.42s)

                                                
                                                
=== RUN   TestPreload
preload_test.go:44: (dbg) Run:  out/minikube-linux-arm64 start -p test-preload-548695 --memory=2200 --alsologtostderr --wait=true --preload=false --driver=docker  --container-runtime=containerd --kubernetes-version=v1.24.4
E1201 19:19:24.386767  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.crt: no such file or directory
E1201 19:19:37.130134  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.crt: no such file or directory
E1201 19:20:19.452299  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/client.crt: no such file or directory
preload_test.go:44: (dbg) Done: out/minikube-linux-arm64 start -p test-preload-548695 --memory=2200 --alsologtostderr --wait=true --preload=false --driver=docker  --container-runtime=containerd --kubernetes-version=v1.24.4: (1m20.425260367s)
preload_test.go:52: (dbg) Run:  out/minikube-linux-arm64 -p test-preload-548695 image pull gcr.io/k8s-minikube/busybox
preload_test.go:52: (dbg) Done: out/minikube-linux-arm64 -p test-preload-548695 image pull gcr.io/k8s-minikube/busybox: (1.372191277s)
preload_test.go:58: (dbg) Run:  out/minikube-linux-arm64 stop -p test-preload-548695
preload_test.go:58: (dbg) Done: out/minikube-linux-arm64 stop -p test-preload-548695: (12.216759775s)
preload_test.go:66: (dbg) Run:  out/minikube-linux-arm64 start -p test-preload-548695 --memory=2200 --alsologtostderr -v=1 --wait=true --driver=docker  --container-runtime=containerd
E1201 19:21:42.494714  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/client.crt: no such file or directory
preload_test.go:66: (dbg) Done: out/minikube-linux-arm64 start -p test-preload-548695 --memory=2200 --alsologtostderr -v=1 --wait=true --driver=docker  --container-runtime=containerd: (1m28.716905257s)
preload_test.go:71: (dbg) Run:  out/minikube-linux-arm64 -p test-preload-548695 image list
helpers_test.go:175: Cleaning up "test-preload-548695" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p test-preload-548695
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p test-preload-548695: (2.414227414s)
--- PASS: TestPreload (185.42s)

                                                
                                    
x
+
TestScheduledStopUnix (109.09s)

                                                
                                                
=== RUN   TestScheduledStopUnix
scheduled_stop_test.go:128: (dbg) Run:  out/minikube-linux-arm64 start -p scheduled-stop-916379 --memory=2048 --driver=docker  --container-runtime=containerd
scheduled_stop_test.go:128: (dbg) Done: out/minikube-linux-arm64 start -p scheduled-stop-916379 --memory=2048 --driver=docker  --container-runtime=containerd: (32.04581957s)
scheduled_stop_test.go:137: (dbg) Run:  out/minikube-linux-arm64 stop -p scheduled-stop-916379 --schedule 5m
scheduled_stop_test.go:191: (dbg) Run:  out/minikube-linux-arm64 status --format={{.TimeToStop}} -p scheduled-stop-916379 -n scheduled-stop-916379
scheduled_stop_test.go:169: signal error was:  <nil>
scheduled_stop_test.go:137: (dbg) Run:  out/minikube-linux-arm64 stop -p scheduled-stop-916379 --schedule 15s
scheduled_stop_test.go:169: signal error was:  os: process already finished
scheduled_stop_test.go:137: (dbg) Run:  out/minikube-linux-arm64 stop -p scheduled-stop-916379 --cancel-scheduled
scheduled_stop_test.go:176: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p scheduled-stop-916379 -n scheduled-stop-916379
scheduled_stop_test.go:205: (dbg) Run:  out/minikube-linux-arm64 status -p scheduled-stop-916379
scheduled_stop_test.go:137: (dbg) Run:  out/minikube-linux-arm64 stop -p scheduled-stop-916379 --schedule 15s
scheduled_stop_test.go:169: signal error was:  os: process already finished
scheduled_stop_test.go:205: (dbg) Run:  out/minikube-linux-arm64 status -p scheduled-stop-916379
scheduled_stop_test.go:205: (dbg) Non-zero exit: out/minikube-linux-arm64 status -p scheduled-stop-916379: exit status 7 (110.684932ms)

                                                
                                                
-- stdout --
	scheduled-stop-916379
	type: Control Plane
	host: Stopped
	kubelet: Stopped
	apiserver: Stopped
	kubeconfig: Stopped
	

                                                
                                                
-- /stdout --
scheduled_stop_test.go:176: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p scheduled-stop-916379 -n scheduled-stop-916379
scheduled_stop_test.go:176: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Host}} -p scheduled-stop-916379 -n scheduled-stop-916379: exit status 7 (91.574324ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
scheduled_stop_test.go:176: status error: exit status 7 (may be ok)
helpers_test.go:175: Cleaning up "scheduled-stop-916379" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p scheduled-stop-916379
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p scheduled-stop-916379: (5.15137565s)
--- PASS: TestScheduledStopUnix (109.09s)

                                                
                                    
x
+
TestInsufficientStorage (10.55s)

                                                
                                                
=== RUN   TestInsufficientStorage
status_test.go:50: (dbg) Run:  out/minikube-linux-arm64 start -p insufficient-storage-122085 --memory=2048 --output=json --wait=true --driver=docker  --container-runtime=containerd
status_test.go:50: (dbg) Non-zero exit: out/minikube-linux-arm64 start -p insufficient-storage-122085 --memory=2048 --output=json --wait=true --driver=docker  --container-runtime=containerd: exit status 26 (7.916744518s)

                                                
                                                
-- stdout --
	{"specversion":"1.0","id":"4feb56cf-4173-4f05-9b1a-8a50011ee0c2","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"0","message":"[insufficient-storage-122085] minikube v1.32.0 on Ubuntu 20.04 (arm64)","name":"Initial Minikube Setup","totalsteps":"19"}}
	{"specversion":"1.0","id":"3d119ad9-85a2-4d1b-bdf8-7a975a22ce26","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_LOCATION=17703"}}
	{"specversion":"1.0","id":"2e7f1e3f-f131-4913-b483-2e475e2cb1b1","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true"}}
	{"specversion":"1.0","id":"b74c8f2c-2f17-4403-9042-ea60047866c0","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"KUBECONFIG=/home/jenkins/minikube-integration/17703-252966/kubeconfig"}}
	{"specversion":"1.0","id":"6e29ff7c-e158-4372-b5b7-9413b29eec24","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_HOME=/home/jenkins/minikube-integration/17703-252966/.minikube"}}
	{"specversion":"1.0","id":"580ce309-97df-4639-afd5-cce0d18390d2","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_BIN=out/minikube-linux-arm64"}}
	{"specversion":"1.0","id":"389009a0-8557-4bf3-b19f-186a928f1914","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_FORCE_SYSTEMD="}}
	{"specversion":"1.0","id":"95911317-a517-4160-912c-18a2c2728ab4","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_TEST_STORAGE_CAPACITY=100"}}
	{"specversion":"1.0","id":"2ad93a5a-e561-4e6a-b5f6-7a1ef5c3f72e","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_TEST_AVAILABLE_STORAGE=19"}}
	{"specversion":"1.0","id":"0a1a5bf4-5e1f-428e-b609-50280364234e","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"1","message":"Using the docker driver based on user configuration","name":"Selecting Driver","totalsteps":"19"}}
	{"specversion":"1.0","id":"39f73972-268d-4c18-b4e9-d8bf653a248e","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"Using Docker driver with root privileges"}}
	{"specversion":"1.0","id":"8000442d-845c-4233-8d51-2f11b526732e","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"3","message":"Starting control plane node insufficient-storage-122085 in cluster insufficient-storage-122085","name":"Starting Node","totalsteps":"19"}}
	{"specversion":"1.0","id":"8df0e2fd-8ac3-4560-b1bf-4b6ebaf87ac9","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"5","message":"Pulling base image ...","name":"Pulling Base Image","totalsteps":"19"}}
	{"specversion":"1.0","id":"5f30ad36-27c9-4eb4-a942-0bc0964afef6","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"8","message":"Creating docker container (CPUs=2, Memory=2048MB) ...","name":"Creating Container","totalsteps":"19"}}
	{"specversion":"1.0","id":"60792cef-0513-4e5b-96df-21ae93334c8f","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.error","datacontenttype":"application/json","data":{"advice":"Try one or more of the following to free up space on the device:\n\t\n\t\t\t1. Run \"docker system prune\" to remove unused Docker data (optionally with \"-a\")\n\t\t\t2. Increase the storage allocated to Docker for Desktop by clicking on:\n\t\t\t\tDocker icon \u003e Preferences \u003e Resources \u003e Disk Image Size\n\t\t\t3. Run \"minikube ssh -- docker system prune\" if using the Docker container runtime","exitcode":"26","issues":"https://github.com/kubernetes/minikube/issues/9024","message":"Docker is out of disk space! (/var is at 100%% of capacity). You can pass '--force' to skip this check.","name":"RSRC_DOCKER_STORAGE","url":""}}

                                                
                                                
-- /stdout --
status_test.go:76: (dbg) Run:  out/minikube-linux-arm64 status -p insufficient-storage-122085 --output=json --layout=cluster
status_test.go:76: (dbg) Non-zero exit: out/minikube-linux-arm64 status -p insufficient-storage-122085 --output=json --layout=cluster: exit status 7 (333.051713ms)

                                                
                                                
-- stdout --
	{"Name":"insufficient-storage-122085","StatusCode":507,"StatusName":"InsufficientStorage","StatusDetail":"/var is almost out of disk space","Step":"Creating Container","StepDetail":"Creating docker container (CPUs=2, Memory=2048MB) ...","BinaryVersion":"v1.32.0","Components":{"kubeconfig":{"Name":"kubeconfig","StatusCode":500,"StatusName":"Error"}},"Nodes":[{"Name":"insufficient-storage-122085","StatusCode":507,"StatusName":"InsufficientStorage","Components":{"apiserver":{"Name":"apiserver","StatusCode":405,"StatusName":"Stopped"},"kubelet":{"Name":"kubelet","StatusCode":405,"StatusName":"Stopped"}}}]}

                                                
                                                
-- /stdout --
** stderr ** 
	E1201 19:24:21.563425  366031 status.go:415] kubeconfig endpoint: extract IP: "insufficient-storage-122085" does not appear in /home/jenkins/minikube-integration/17703-252966/kubeconfig

                                                
                                                
** /stderr **
status_test.go:76: (dbg) Run:  out/minikube-linux-arm64 status -p insufficient-storage-122085 --output=json --layout=cluster
status_test.go:76: (dbg) Non-zero exit: out/minikube-linux-arm64 status -p insufficient-storage-122085 --output=json --layout=cluster: exit status 7 (334.075808ms)

                                                
                                                
-- stdout --
	{"Name":"insufficient-storage-122085","StatusCode":507,"StatusName":"InsufficientStorage","StatusDetail":"/var is almost out of disk space","BinaryVersion":"v1.32.0","Components":{"kubeconfig":{"Name":"kubeconfig","StatusCode":500,"StatusName":"Error"}},"Nodes":[{"Name":"insufficient-storage-122085","StatusCode":507,"StatusName":"InsufficientStorage","Components":{"apiserver":{"Name":"apiserver","StatusCode":405,"StatusName":"Stopped"},"kubelet":{"Name":"kubelet","StatusCode":405,"StatusName":"Stopped"}}}]}

                                                
                                                
-- /stdout --
** stderr ** 
	E1201 19:24:21.898673  366083 status.go:415] kubeconfig endpoint: extract IP: "insufficient-storage-122085" does not appear in /home/jenkins/minikube-integration/17703-252966/kubeconfig
	E1201 19:24:21.911127  366083 status.go:559] unable to read event log: stat: stat /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/insufficient-storage-122085/events.json: no such file or directory

                                                
                                                
** /stderr **
helpers_test.go:175: Cleaning up "insufficient-storage-122085" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p insufficient-storage-122085
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p insufficient-storage-122085: (1.963973863s)
--- PASS: TestInsufficientStorage (10.55s)

                                                
                                    
x
+
TestRunningBinaryUpgrade (87.83s)

                                                
                                                
=== RUN   TestRunningBinaryUpgrade
=== PAUSE TestRunningBinaryUpgrade

                                                
                                                

                                                
                                                
=== CONT  TestRunningBinaryUpgrade
version_upgrade_test.go:133: (dbg) Run:  /tmp/minikube-v1.26.0.1969813380.exe start -p running-upgrade-736353 --memory=2200 --vm-driver=docker  --container-runtime=containerd
version_upgrade_test.go:133: (dbg) Done: /tmp/minikube-v1.26.0.1969813380.exe start -p running-upgrade-736353 --memory=2200 --vm-driver=docker  --container-runtime=containerd: (44.180418686s)
version_upgrade_test.go:143: (dbg) Run:  out/minikube-linux-arm64 start -p running-upgrade-736353 --memory=2200 --alsologtostderr -v=1 --driver=docker  --container-runtime=containerd
version_upgrade_test.go:143: (dbg) Done: out/minikube-linux-arm64 start -p running-upgrade-736353 --memory=2200 --alsologtostderr -v=1 --driver=docker  --container-runtime=containerd: (38.691783139s)
helpers_test.go:175: Cleaning up "running-upgrade-736353" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p running-upgrade-736353
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p running-upgrade-736353: (3.344368098s)
--- PASS: TestRunningBinaryUpgrade (87.83s)

                                                
                                    
x
+
TestKubernetesUpgrade (140.63s)

                                                
                                                
=== RUN   TestKubernetesUpgrade
=== PAUSE TestKubernetesUpgrade

                                                
                                                

                                                
                                                
=== CONT  TestKubernetesUpgrade
version_upgrade_test.go:235: (dbg) Run:  out/minikube-linux-arm64 start -p kubernetes-upgrade-548139 --memory=2200 --kubernetes-version=v1.16.0 --alsologtostderr -v=1 --driver=docker  --container-runtime=containerd
version_upgrade_test.go:235: (dbg) Done: out/minikube-linux-arm64 start -p kubernetes-upgrade-548139 --memory=2200 --kubernetes-version=v1.16.0 --alsologtostderr -v=1 --driver=docker  --container-runtime=containerd: (1m11.774399466s)
version_upgrade_test.go:240: (dbg) Run:  out/minikube-linux-arm64 stop -p kubernetes-upgrade-548139
version_upgrade_test.go:240: (dbg) Done: out/minikube-linux-arm64 stop -p kubernetes-upgrade-548139: (1.421176899s)
version_upgrade_test.go:245: (dbg) Run:  out/minikube-linux-arm64 -p kubernetes-upgrade-548139 status --format={{.Host}}
version_upgrade_test.go:245: (dbg) Non-zero exit: out/minikube-linux-arm64 -p kubernetes-upgrade-548139 status --format={{.Host}}: exit status 7 (104.441275ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
version_upgrade_test.go:247: status error: exit status 7 (may be ok)
version_upgrade_test.go:256: (dbg) Run:  out/minikube-linux-arm64 start -p kubernetes-upgrade-548139 --memory=2200 --kubernetes-version=v1.29.0-rc.1 --alsologtostderr -v=1 --driver=docker  --container-runtime=containerd
version_upgrade_test.go:256: (dbg) Done: out/minikube-linux-arm64 start -p kubernetes-upgrade-548139 --memory=2200 --kubernetes-version=v1.29.0-rc.1 --alsologtostderr -v=1 --driver=docker  --container-runtime=containerd: (33.97916811s)
version_upgrade_test.go:261: (dbg) Run:  kubectl --context kubernetes-upgrade-548139 version --output=json
version_upgrade_test.go:280: Attempting to downgrade Kubernetes (should fail)
version_upgrade_test.go:282: (dbg) Run:  out/minikube-linux-arm64 start -p kubernetes-upgrade-548139 --memory=2200 --kubernetes-version=v1.16.0 --driver=docker  --container-runtime=containerd
version_upgrade_test.go:282: (dbg) Non-zero exit: out/minikube-linux-arm64 start -p kubernetes-upgrade-548139 --memory=2200 --kubernetes-version=v1.16.0 --driver=docker  --container-runtime=containerd: exit status 106 (98.080018ms)

                                                
                                                
-- stdout --
	* [kubernetes-upgrade-548139] minikube v1.32.0 on Ubuntu 20.04 (arm64)
	  - MINIKUBE_LOCATION=17703
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/17703-252966/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/17703-252966/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-arm64
	  - MINIKUBE_FORCE_SYSTEMD=
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	X Exiting due to K8S_DOWNGRADE_UNSUPPORTED: Unable to safely downgrade existing Kubernetes v1.29.0-rc.1 cluster to v1.16.0
	* Suggestion: 
	
	    1) Recreate the cluster with Kubernetes 1.16.0, by running:
	    
	    minikube delete -p kubernetes-upgrade-548139
	    minikube start -p kubernetes-upgrade-548139 --kubernetes-version=v1.16.0
	    
	    2) Create a second cluster with Kubernetes 1.16.0, by running:
	    
	    minikube start -p kubernetes-upgrade-5481392 --kubernetes-version=v1.16.0
	    
	    3) Use the existing cluster at version Kubernetes 1.29.0-rc.1, by running:
	    
	    minikube start -p kubernetes-upgrade-548139 --kubernetes-version=v1.29.0-rc.1
	    

                                                
                                                
** /stderr **
version_upgrade_test.go:286: Attempting restart after unsuccessful downgrade
version_upgrade_test.go:288: (dbg) Run:  out/minikube-linux-arm64 start -p kubernetes-upgrade-548139 --memory=2200 --kubernetes-version=v1.29.0-rc.1 --alsologtostderr -v=1 --driver=docker  --container-runtime=containerd
version_upgrade_test.go:288: (dbg) Done: out/minikube-linux-arm64 start -p kubernetes-upgrade-548139 --memory=2200 --kubernetes-version=v1.29.0-rc.1 --alsologtostderr -v=1 --driver=docker  --container-runtime=containerd: (30.318475598s)
helpers_test.go:175: Cleaning up "kubernetes-upgrade-548139" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p kubernetes-upgrade-548139
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p kubernetes-upgrade-548139: (2.840269442s)
--- PASS: TestKubernetesUpgrade (140.63s)

                                                
                                    
x
+
TestMissingContainerUpgrade (172.51s)

                                                
                                                
=== RUN   TestMissingContainerUpgrade
=== PAUSE TestMissingContainerUpgrade

                                                
                                                

                                                
                                                
=== CONT  TestMissingContainerUpgrade
E1201 19:24:24.386056  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.crt: no such file or directory
version_upgrade_test.go:322: (dbg) Run:  /tmp/minikube-v1.26.0.4290788748.exe start -p missing-upgrade-255027 --memory=2200 --driver=docker  --container-runtime=containerd
E1201 19:24:37.130805  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.crt: no such file or directory
E1201 19:25:19.452580  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/client.crt: no such file or directory
version_upgrade_test.go:322: (dbg) Done: /tmp/minikube-v1.26.0.4290788748.exe start -p missing-upgrade-255027 --memory=2200 --driver=docker  --container-runtime=containerd: (1m15.049297448s)
version_upgrade_test.go:331: (dbg) Run:  docker stop missing-upgrade-255027
version_upgrade_test.go:331: (dbg) Done: docker stop missing-upgrade-255027: (10.314476694s)
version_upgrade_test.go:336: (dbg) Run:  docker rm missing-upgrade-255027
version_upgrade_test.go:342: (dbg) Run:  out/minikube-linux-arm64 start -p missing-upgrade-255027 --memory=2200 --alsologtostderr -v=1 --driver=docker  --container-runtime=containerd
E1201 19:26:00.182925  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.crt: no such file or directory
version_upgrade_test.go:342: (dbg) Done: out/minikube-linux-arm64 start -p missing-upgrade-255027 --memory=2200 --alsologtostderr -v=1 --driver=docker  --container-runtime=containerd: (1m22.889637046s)
helpers_test.go:175: Cleaning up "missing-upgrade-255027" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p missing-upgrade-255027
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p missing-upgrade-255027: (2.552573343s)
--- PASS: TestMissingContainerUpgrade (172.51s)

                                                
                                    
x
+
TestPause/serial/Start (98.8s)

                                                
                                                
=== RUN   TestPause/serial/Start
pause_test.go:80: (dbg) Run:  out/minikube-linux-arm64 start -p pause-489967 --memory=2048 --install-addons=false --wait=all --driver=docker  --container-runtime=containerd
pause_test.go:80: (dbg) Done: out/minikube-linux-arm64 start -p pause-489967 --memory=2048 --install-addons=false --wait=all --driver=docker  --container-runtime=containerd: (1m38.801753031s)
--- PASS: TestPause/serial/Start (98.80s)

                                                
                                    
x
+
TestPause/serial/SecondStartNoReconfiguration (6.82s)

                                                
                                                
=== RUN   TestPause/serial/SecondStartNoReconfiguration
pause_test.go:92: (dbg) Run:  out/minikube-linux-arm64 start -p pause-489967 --alsologtostderr -v=1 --driver=docker  --container-runtime=containerd
pause_test.go:92: (dbg) Done: out/minikube-linux-arm64 start -p pause-489967 --alsologtostderr -v=1 --driver=docker  --container-runtime=containerd: (6.809046909s)
--- PASS: TestPause/serial/SecondStartNoReconfiguration (6.82s)

                                                
                                    
x
+
TestPause/serial/Pause (0.87s)

                                                
                                                
=== RUN   TestPause/serial/Pause
pause_test.go:110: (dbg) Run:  out/minikube-linux-arm64 pause -p pause-489967 --alsologtostderr -v=5
--- PASS: TestPause/serial/Pause (0.87s)

                                                
                                    
x
+
TestPause/serial/VerifyStatus (0.4s)

                                                
                                                
=== RUN   TestPause/serial/VerifyStatus
status_test.go:76: (dbg) Run:  out/minikube-linux-arm64 status -p pause-489967 --output=json --layout=cluster
status_test.go:76: (dbg) Non-zero exit: out/minikube-linux-arm64 status -p pause-489967 --output=json --layout=cluster: exit status 2 (396.215704ms)

                                                
                                                
-- stdout --
	{"Name":"pause-489967","StatusCode":418,"StatusName":"Paused","Step":"Done","StepDetail":"* Paused 7 containers in: kube-system, kubernetes-dashboard, storage-gluster, istio-operator","BinaryVersion":"v1.32.0","Components":{"kubeconfig":{"Name":"kubeconfig","StatusCode":200,"StatusName":"OK"}},"Nodes":[{"Name":"pause-489967","StatusCode":200,"StatusName":"OK","Components":{"apiserver":{"Name":"apiserver","StatusCode":418,"StatusName":"Paused"},"kubelet":{"Name":"kubelet","StatusCode":405,"StatusName":"Stopped"}}}]}

                                                
                                                
-- /stdout --
--- PASS: TestPause/serial/VerifyStatus (0.40s)

                                                
                                    
x
+
TestPause/serial/Unpause (1.17s)

                                                
                                                
=== RUN   TestPause/serial/Unpause
pause_test.go:121: (dbg) Run:  out/minikube-linux-arm64 unpause -p pause-489967 --alsologtostderr -v=5
pause_test.go:121: (dbg) Done: out/minikube-linux-arm64 unpause -p pause-489967 --alsologtostderr -v=5: (1.17066381s)
--- PASS: TestPause/serial/Unpause (1.17s)

                                                
                                    
x
+
TestPause/serial/PauseAgain (1.15s)

                                                
                                                
=== RUN   TestPause/serial/PauseAgain
pause_test.go:110: (dbg) Run:  out/minikube-linux-arm64 pause -p pause-489967 --alsologtostderr -v=5
pause_test.go:110: (dbg) Done: out/minikube-linux-arm64 pause -p pause-489967 --alsologtostderr -v=5: (1.154774653s)
--- PASS: TestPause/serial/PauseAgain (1.15s)

                                                
                                    
x
+
TestPause/serial/DeletePaused (5.21s)

                                                
                                                
=== RUN   TestPause/serial/DeletePaused
pause_test.go:132: (dbg) Run:  out/minikube-linux-arm64 delete -p pause-489967 --alsologtostderr -v=5
pause_test.go:132: (dbg) Done: out/minikube-linux-arm64 delete -p pause-489967 --alsologtostderr -v=5: (5.212481573s)
--- PASS: TestPause/serial/DeletePaused (5.21s)

                                                
                                    
x
+
TestPause/serial/VerifyDeletedResources (1.87s)

                                                
                                                
=== RUN   TestPause/serial/VerifyDeletedResources
pause_test.go:142: (dbg) Run:  out/minikube-linux-arm64 profile list --output json
pause_test.go:142: (dbg) Done: out/minikube-linux-arm64 profile list --output json: (1.792295172s)
pause_test.go:168: (dbg) Run:  docker ps -a
pause_test.go:173: (dbg) Run:  docker volume inspect pause-489967
pause_test.go:173: (dbg) Non-zero exit: docker volume inspect pause-489967: exit status 1 (20.499681ms)

                                                
                                                
-- stdout --
	[]

                                                
                                                
-- /stdout --
** stderr ** 
	Error response from daemon: get pause-489967: no such volume

                                                
                                                
** /stderr **
pause_test.go:178: (dbg) Run:  docker network ls
--- PASS: TestPause/serial/VerifyDeletedResources (1.87s)

                                                
                                    
x
+
TestStoppedBinaryUpgrade/Setup (2.03s)

                                                
                                                
=== RUN   TestStoppedBinaryUpgrade/Setup
--- PASS: TestStoppedBinaryUpgrade/Setup (2.03s)

                                                
                                    
x
+
TestStoppedBinaryUpgrade/Upgrade (85.47s)

                                                
                                                
=== RUN   TestStoppedBinaryUpgrade/Upgrade
version_upgrade_test.go:196: (dbg) Run:  /tmp/minikube-v1.26.0.3552351393.exe start -p stopped-upgrade-653161 --memory=2200 --vm-driver=docker  --container-runtime=containerd
version_upgrade_test.go:196: (dbg) Done: /tmp/minikube-v1.26.0.3552351393.exe start -p stopped-upgrade-653161 --memory=2200 --vm-driver=docker  --container-runtime=containerd: (45.638347825s)
version_upgrade_test.go:205: (dbg) Run:  /tmp/minikube-v1.26.0.3552351393.exe -p stopped-upgrade-653161 stop
version_upgrade_test.go:205: (dbg) Done: /tmp/minikube-v1.26.0.3552351393.exe -p stopped-upgrade-653161 stop: (1.417038182s)
version_upgrade_test.go:211: (dbg) Run:  out/minikube-linux-arm64 start -p stopped-upgrade-653161 --memory=2200 --alsologtostderr -v=1 --driver=docker  --container-runtime=containerd
version_upgrade_test.go:211: (dbg) Done: out/minikube-linux-arm64 start -p stopped-upgrade-653161 --memory=2200 --alsologtostderr -v=1 --driver=docker  --container-runtime=containerd: (38.410652864s)
--- PASS: TestStoppedBinaryUpgrade/Upgrade (85.47s)

                                                
                                    
x
+
TestStoppedBinaryUpgrade/MinikubeLogs (1.63s)

                                                
                                                
=== RUN   TestStoppedBinaryUpgrade/MinikubeLogs
version_upgrade_test.go:219: (dbg) Run:  out/minikube-linux-arm64 logs -p stopped-upgrade-653161
version_upgrade_test.go:219: (dbg) Done: out/minikube-linux-arm64 logs -p stopped-upgrade-653161: (1.628236155s)
--- PASS: TestStoppedBinaryUpgrade/MinikubeLogs (1.63s)

                                                
                                    
x
+
TestNoKubernetes/serial/StartNoK8sWithVersion (0.1s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/StartNoK8sWithVersion
no_kubernetes_test.go:83: (dbg) Run:  out/minikube-linux-arm64 start -p NoKubernetes-053646 --no-kubernetes --kubernetes-version=1.20 --driver=docker  --container-runtime=containerd
no_kubernetes_test.go:83: (dbg) Non-zero exit: out/minikube-linux-arm64 start -p NoKubernetes-053646 --no-kubernetes --kubernetes-version=1.20 --driver=docker  --container-runtime=containerd: exit status 14 (103.481642ms)

                                                
                                                
-- stdout --
	* [NoKubernetes-053646] minikube v1.32.0 on Ubuntu 20.04 (arm64)
	  - MINIKUBE_LOCATION=17703
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/17703-252966/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/17703-252966/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-arm64
	  - MINIKUBE_FORCE_SYSTEMD=
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	X Exiting due to MK_USAGE: cannot specify --kubernetes-version with --no-kubernetes,
	to unset a global config run:
	
	$ minikube config unset kubernetes-version

                                                
                                                
** /stderr **
--- PASS: TestNoKubernetes/serial/StartNoK8sWithVersion (0.10s)

                                                
                                    
x
+
TestNoKubernetes/serial/StartWithK8s (41.53s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/StartWithK8s
no_kubernetes_test.go:95: (dbg) Run:  out/minikube-linux-arm64 start -p NoKubernetes-053646 --driver=docker  --container-runtime=containerd
E1201 19:29:37.130131  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.crt: no such file or directory
no_kubernetes_test.go:95: (dbg) Done: out/minikube-linux-arm64 start -p NoKubernetes-053646 --driver=docker  --container-runtime=containerd: (40.977414597s)
no_kubernetes_test.go:200: (dbg) Run:  out/minikube-linux-arm64 -p NoKubernetes-053646 status -o json
--- PASS: TestNoKubernetes/serial/StartWithK8s (41.53s)

                                                
                                    
x
+
TestNoKubernetes/serial/StartWithStopK8s (19.66s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/StartWithStopK8s
no_kubernetes_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p NoKubernetes-053646 --no-kubernetes --driver=docker  --container-runtime=containerd
no_kubernetes_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p NoKubernetes-053646 --no-kubernetes --driver=docker  --container-runtime=containerd: (17.060757096s)
no_kubernetes_test.go:200: (dbg) Run:  out/minikube-linux-arm64 -p NoKubernetes-053646 status -o json
no_kubernetes_test.go:200: (dbg) Non-zero exit: out/minikube-linux-arm64 -p NoKubernetes-053646 status -o json: exit status 2 (535.583348ms)

                                                
                                                
-- stdout --
	{"Name":"NoKubernetes-053646","Host":"Running","Kubelet":"Stopped","APIServer":"Stopped","Kubeconfig":"Configured","Worker":false}

                                                
                                                
-- /stdout --
no_kubernetes_test.go:124: (dbg) Run:  out/minikube-linux-arm64 delete -p NoKubernetes-053646
no_kubernetes_test.go:124: (dbg) Done: out/minikube-linux-arm64 delete -p NoKubernetes-053646: (2.065313249s)
--- PASS: TestNoKubernetes/serial/StartWithStopK8s (19.66s)

                                                
                                    
x
+
TestNetworkPlugins/group/false (4.8s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/false
net_test.go:246: (dbg) Run:  out/minikube-linux-arm64 start -p false-730581 --memory=2048 --alsologtostderr --cni=false --driver=docker  --container-runtime=containerd
net_test.go:246: (dbg) Non-zero exit: out/minikube-linux-arm64 start -p false-730581 --memory=2048 --alsologtostderr --cni=false --driver=docker  --container-runtime=containerd: exit status 14 (309.707787ms)

                                                
                                                
-- stdout --
	* [false-730581] minikube v1.32.0 on Ubuntu 20.04 (arm64)
	  - MINIKUBE_LOCATION=17703
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/17703-252966/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/17703-252966/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-arm64
	  - MINIKUBE_FORCE_SYSTEMD=
	* Using the docker driver based on user configuration
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	I1201 19:30:14.917542  396231 out.go:296] Setting OutFile to fd 1 ...
	I1201 19:30:14.917755  396231 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I1201 19:30:14.917766  396231 out.go:309] Setting ErrFile to fd 2...
	I1201 19:30:14.917774  396231 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I1201 19:30:14.918098  396231 root.go:338] Updating PATH: /home/jenkins/minikube-integration/17703-252966/.minikube/bin
	I1201 19:30:14.918560  396231 out.go:303] Setting JSON to false
	I1201 19:30:14.919728  396231 start.go:128] hostinfo: {"hostname":"ip-172-31-31-251","uptime":7961,"bootTime":1701451054,"procs":354,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1050-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"982e3628-3742-4b3e-bb63-ac1b07660ec7"}
	I1201 19:30:14.919805  396231 start.go:138] virtualization:  
	I1201 19:30:14.923081  396231 out.go:177] * [false-730581] minikube v1.32.0 on Ubuntu 20.04 (arm64)
	I1201 19:30:14.925004  396231 out.go:177]   - MINIKUBE_LOCATION=17703
	I1201 19:30:14.925060  396231 notify.go:220] Checking for updates...
	I1201 19:30:14.928828  396231 out.go:177]   - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I1201 19:30:14.931061  396231 out.go:177]   - KUBECONFIG=/home/jenkins/minikube-integration/17703-252966/kubeconfig
	I1201 19:30:14.936583  396231 out.go:177]   - MINIKUBE_HOME=/home/jenkins/minikube-integration/17703-252966/.minikube
	I1201 19:30:14.938547  396231 out.go:177]   - MINIKUBE_BIN=out/minikube-linux-arm64
	I1201 19:30:14.940650  396231 out.go:177]   - MINIKUBE_FORCE_SYSTEMD=
	I1201 19:30:14.943182  396231 config.go:182] Loaded profile config "NoKubernetes-053646": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v0.0.0
	I1201 19:30:14.943366  396231 driver.go:392] Setting default libvirt URI to qemu:///system
	I1201 19:30:14.982992  396231 docker.go:122] docker version: linux-24.0.7:Docker Engine - Community
	I1201 19:30:14.983111  396231 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I1201 19:30:15.098135  396231 info.go:266] docker info: {ID:EOU5:DNGX:XN6V:L2FZ:UXRM:5TWK:EVUR:KC2F:GT7Z:Y4O4:GB77:5PD3 Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:31 OomKillDisable:true NGoroutines:45 SystemTime:2023-12-01 19:30:15.085782859 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1050-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archi
tecture:aarch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8215040000 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-31-251 Labels:[] ExperimentalBuild:false ServerVersion:24.0.7 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f Expected:d8f198a4ed8892c764191ef7b3b06d8a2eeb5c7f} RuncCommit:{ID:v1.1.10-0-g18a0cb0 Expected:v1.1.10-0-g18a0cb0} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.21.0]] Warnings:<nil>}}
	I1201 19:30:15.098286  396231 docker.go:295] overlay module found
	I1201 19:30:15.100717  396231 out.go:177] * Using the docker driver based on user configuration
	I1201 19:30:15.102514  396231 start.go:298] selected driver: docker
	I1201 19:30:15.102549  396231 start.go:902] validating driver "docker" against <nil>
	I1201 19:30:15.102564  396231 start.go:913] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
	I1201 19:30:15.105589  396231 out.go:177] 
	W1201 19:30:15.107565  396231 out.go:239] X Exiting due to MK_USAGE: The "containerd" container runtime requires CNI
	X Exiting due to MK_USAGE: The "containerd" container runtime requires CNI
	I1201 19:30:15.109725  396231 out.go:177] 

                                                
                                                
** /stderr **
net_test.go:88: 
----------------------- debugLogs start: false-730581 [pass: true] --------------------------------
>>> netcat: nslookup kubernetes.default:
Error in configuration: context was not found for specified context: false-730581

                                                
                                                

                                                
                                                
>>> netcat: nslookup debug kubernetes.default a-records:
Error in configuration: context was not found for specified context: false-730581

                                                
                                                

                                                
                                                
>>> netcat: dig search kubernetes.default:
Error in configuration: context was not found for specified context: false-730581

                                                
                                                

                                                
                                                
>>> netcat: dig @10.96.0.10 kubernetes.default.svc.cluster.local udp/53:
Error in configuration: context was not found for specified context: false-730581

                                                
                                                

                                                
                                                
>>> netcat: dig @10.96.0.10 kubernetes.default.svc.cluster.local tcp/53:
Error in configuration: context was not found for specified context: false-730581

                                                
                                                

                                                
                                                
>>> netcat: nc 10.96.0.10 udp/53:
Error in configuration: context was not found for specified context: false-730581

                                                
                                                

                                                
                                                
>>> netcat: nc 10.96.0.10 tcp/53:
Error in configuration: context was not found for specified context: false-730581

                                                
                                                

                                                
                                                
>>> netcat: /etc/nsswitch.conf:
Error in configuration: context was not found for specified context: false-730581

                                                
                                                

                                                
                                                
>>> netcat: /etc/hosts:
Error in configuration: context was not found for specified context: false-730581

                                                
                                                

                                                
                                                
>>> netcat: /etc/resolv.conf:
Error in configuration: context was not found for specified context: false-730581

                                                
                                                

                                                
                                                
>>> host: /etc/nsswitch.conf:
* Profile "false-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-730581"

                                                
                                                

                                                
                                                
>>> host: /etc/hosts:
* Profile "false-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-730581"

                                                
                                                

                                                
                                                
>>> host: /etc/resolv.conf:
* Profile "false-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-730581"

                                                
                                                

                                                
                                                
>>> k8s: nodes, services, endpoints, daemon sets, deployments and pods, :
Error in configuration: context was not found for specified context: false-730581

                                                
                                                

                                                
                                                
>>> host: crictl pods:
* Profile "false-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-730581"

                                                
                                                

                                                
                                                
>>> host: crictl containers:
* Profile "false-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-730581"

                                                
                                                

                                                
                                                
>>> k8s: describe netcat deployment:
error: context "false-730581" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe netcat pod(s):
error: context "false-730581" does not exist

                                                
                                                

                                                
                                                
>>> k8s: netcat logs:
error: context "false-730581" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe coredns deployment:
error: context "false-730581" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe coredns pods:
error: context "false-730581" does not exist

                                                
                                                

                                                
                                                
>>> k8s: coredns logs:
error: context "false-730581" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe api server pod(s):
error: context "false-730581" does not exist

                                                
                                                

                                                
                                                
>>> k8s: api server logs:
error: context "false-730581" does not exist

                                                
                                                

                                                
                                                
>>> host: /etc/cni:
* Profile "false-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-730581"

                                                
                                                

                                                
                                                
>>> host: ip a s:
* Profile "false-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-730581"

                                                
                                                

                                                
                                                
>>> host: ip r s:
* Profile "false-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-730581"

                                                
                                                

                                                
                                                
>>> host: iptables-save:
* Profile "false-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-730581"

                                                
                                                

                                                
                                                
>>> host: iptables table nat:
* Profile "false-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-730581"

                                                
                                                

                                                
                                                
>>> k8s: describe kube-proxy daemon set:
error: context "false-730581" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe kube-proxy pod(s):
error: context "false-730581" does not exist

                                                
                                                

                                                
                                                
>>> k8s: kube-proxy logs:
error: context "false-730581" does not exist

                                                
                                                

                                                
                                                
>>> host: kubelet daemon status:
* Profile "false-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-730581"

                                                
                                                

                                                
                                                
>>> host: kubelet daemon config:
* Profile "false-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-730581"

                                                
                                                

                                                
                                                
>>> k8s: kubelet logs:
* Profile "false-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-730581"

                                                
                                                

                                                
                                                
>>> host: /etc/kubernetes/kubelet.conf:
* Profile "false-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-730581"

                                                
                                                

                                                
                                                
>>> host: /var/lib/kubelet/config.yaml:
* Profile "false-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-730581"

                                                
                                                

                                                
                                                
>>> k8s: kubectl config:
apiVersion: v1
clusters:
- cluster:
certificate-authority: /home/jenkins/minikube-integration/17703-252966/.minikube/ca.crt
extensions:
- extension:
last-update: Fri, 01 Dec 2023 19:30:11 UTC
provider: minikube.sigs.k8s.io
version: v1.32.0
name: cluster_info
server: https://192.168.76.2:8443
name: NoKubernetes-053646
contexts:
- context:
cluster: NoKubernetes-053646
extensions:
- extension:
last-update: Fri, 01 Dec 2023 19:30:11 UTC
provider: minikube.sigs.k8s.io
version: v1.32.0
name: context_info
namespace: default
user: NoKubernetes-053646
name: NoKubernetes-053646
current-context: NoKubernetes-053646
kind: Config
preferences: {}
users:
- name: NoKubernetes-053646
user:
client-certificate: /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/NoKubernetes-053646/client.crt
client-key: /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/NoKubernetes-053646/client.key

                                                
                                                

                                                
                                                
>>> k8s: cms:
Error in configuration: context was not found for specified context: false-730581

                                                
                                                

                                                
                                                
>>> host: docker daemon status:
* Profile "false-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-730581"

                                                
                                                

                                                
                                                
>>> host: docker daemon config:
* Profile "false-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-730581"

                                                
                                                

                                                
                                                
>>> host: /etc/docker/daemon.json:
* Profile "false-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-730581"

                                                
                                                

                                                
                                                
>>> host: docker system info:
* Profile "false-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-730581"

                                                
                                                

                                                
                                                
>>> host: cri-docker daemon status:
* Profile "false-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-730581"

                                                
                                                

                                                
                                                
>>> host: cri-docker daemon config:
* Profile "false-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-730581"

                                                
                                                

                                                
                                                
>>> host: /etc/systemd/system/cri-docker.service.d/10-cni.conf:
* Profile "false-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-730581"

                                                
                                                

                                                
                                                
>>> host: /usr/lib/systemd/system/cri-docker.service:
* Profile "false-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-730581"

                                                
                                                

                                                
                                                
>>> host: cri-dockerd version:
* Profile "false-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-730581"

                                                
                                                

                                                
                                                
>>> host: containerd daemon status:
* Profile "false-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-730581"

                                                
                                                

                                                
                                                
>>> host: containerd daemon config:
* Profile "false-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-730581"

                                                
                                                

                                                
                                                
>>> host: /lib/systemd/system/containerd.service:
* Profile "false-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-730581"

                                                
                                                

                                                
                                                
>>> host: /etc/containerd/config.toml:
* Profile "false-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-730581"

                                                
                                                

                                                
                                                
>>> host: containerd config dump:
* Profile "false-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-730581"

                                                
                                                

                                                
                                                
>>> host: crio daemon status:
* Profile "false-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-730581"

                                                
                                                

                                                
                                                
>>> host: crio daemon config:
* Profile "false-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-730581"

                                                
                                                

                                                
                                                
>>> host: /etc/crio:
* Profile "false-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-730581"

                                                
                                                

                                                
                                                
>>> host: crio config:
* Profile "false-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-730581"

                                                
                                                
----------------------- debugLogs end: false-730581 [took: 4.30947094s] --------------------------------
helpers_test.go:175: Cleaning up "false-730581" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p false-730581
E1201 19:30:19.452008  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/client.crt: no such file or directory
--- PASS: TestNetworkPlugins/group/false (4.80s)

                                                
                                    
x
+
TestNoKubernetes/serial/Start (9.79s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/Start
no_kubernetes_test.go:136: (dbg) Run:  out/minikube-linux-arm64 start -p NoKubernetes-053646 --no-kubernetes --driver=docker  --container-runtime=containerd
no_kubernetes_test.go:136: (dbg) Done: out/minikube-linux-arm64 start -p NoKubernetes-053646 --no-kubernetes --driver=docker  --container-runtime=containerd: (9.787091344s)
--- PASS: TestNoKubernetes/serial/Start (9.79s)

                                                
                                    
x
+
TestNoKubernetes/serial/VerifyK8sNotRunning (0.41s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/VerifyK8sNotRunning
no_kubernetes_test.go:147: (dbg) Run:  out/minikube-linux-arm64 ssh -p NoKubernetes-053646 "sudo systemctl is-active --quiet service kubelet"
no_kubernetes_test.go:147: (dbg) Non-zero exit: out/minikube-linux-arm64 ssh -p NoKubernetes-053646 "sudo systemctl is-active --quiet service kubelet": exit status 1 (411.587943ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 3

                                                
                                                
** /stderr **
--- PASS: TestNoKubernetes/serial/VerifyK8sNotRunning (0.41s)

                                                
                                    
x
+
TestNoKubernetes/serial/ProfileList (1.21s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/ProfileList
no_kubernetes_test.go:169: (dbg) Run:  out/minikube-linux-arm64 profile list
no_kubernetes_test.go:179: (dbg) Run:  out/minikube-linux-arm64 profile list --output=json
--- PASS: TestNoKubernetes/serial/ProfileList (1.21s)

                                                
                                    
x
+
TestNoKubernetes/serial/Stop (1.34s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/Stop
no_kubernetes_test.go:158: (dbg) Run:  out/minikube-linux-arm64 stop -p NoKubernetes-053646
no_kubernetes_test.go:158: (dbg) Done: out/minikube-linux-arm64 stop -p NoKubernetes-053646: (1.336159836s)
--- PASS: TestNoKubernetes/serial/Stop (1.34s)

                                                
                                    
x
+
TestNoKubernetes/serial/StartNoArgs (7.86s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/StartNoArgs
no_kubernetes_test.go:191: (dbg) Run:  out/minikube-linux-arm64 start -p NoKubernetes-053646 --driver=docker  --container-runtime=containerd
no_kubernetes_test.go:191: (dbg) Done: out/minikube-linux-arm64 start -p NoKubernetes-053646 --driver=docker  --container-runtime=containerd: (7.855245063s)
--- PASS: TestNoKubernetes/serial/StartNoArgs (7.86s)

                                                
                                    
x
+
TestNoKubernetes/serial/VerifyK8sNotRunningSecond (0.39s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/VerifyK8sNotRunningSecond
no_kubernetes_test.go:147: (dbg) Run:  out/minikube-linux-arm64 ssh -p NoKubernetes-053646 "sudo systemctl is-active --quiet service kubelet"
no_kubernetes_test.go:147: (dbg) Non-zero exit: out/minikube-linux-arm64 ssh -p NoKubernetes-053646 "sudo systemctl is-active --quiet service kubelet": exit status 1 (393.993868ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 3

                                                
                                                
** /stderr **
--- PASS: TestNoKubernetes/serial/VerifyK8sNotRunningSecond (0.39s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/FirstStart (119.92s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/FirstStart
start_stop_delete_test.go:186: (dbg) Run:  out/minikube-linux-arm64 start -p old-k8s-version-231557 --memory=2200 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker  --container-runtime=containerd --kubernetes-version=v1.16.0
E1201 19:32:27.434037  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.crt: no such file or directory
start_stop_delete_test.go:186: (dbg) Done: out/minikube-linux-arm64 start -p old-k8s-version-231557 --memory=2200 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker  --container-runtime=containerd --kubernetes-version=v1.16.0: (1m59.923155177s)
--- PASS: TestStartStop/group/old-k8s-version/serial/FirstStart (119.92s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/FirstStart (93.35s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/FirstStart
start_stop_delete_test.go:186: (dbg) Run:  out/minikube-linux-arm64 start -p default-k8s-diff-port-658051 --memory=2200 --alsologtostderr --wait=true --apiserver-port=8444 --driver=docker  --container-runtime=containerd --kubernetes-version=v1.28.4
start_stop_delete_test.go:186: (dbg) Done: out/minikube-linux-arm64 start -p default-k8s-diff-port-658051 --memory=2200 --alsologtostderr --wait=true --apiserver-port=8444 --driver=docker  --container-runtime=containerd --kubernetes-version=v1.28.4: (1m33.34714549s)
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/FirstStart (93.35s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/DeployApp (8.64s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/DeployApp
start_stop_delete_test.go:196: (dbg) Run:  kubectl --context old-k8s-version-231557 create -f testdata/busybox.yaml
start_stop_delete_test.go:196: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:344: "busybox" [205d7eab-499a-47ba-ba1b-bb2f60d6bcf7] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:344: "busybox" [205d7eab-499a-47ba-ba1b-bb2f60d6bcf7] Running
start_stop_delete_test.go:196: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: integration-test=busybox healthy within 8.039284484s
start_stop_delete_test.go:196: (dbg) Run:  kubectl --context old-k8s-version-231557 exec busybox -- /bin/sh -c "ulimit -n"
--- PASS: TestStartStop/group/old-k8s-version/serial/DeployApp (8.64s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/EnableAddonWhileActive (1.18s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/EnableAddonWhileActive
start_stop_delete_test.go:205: (dbg) Run:  out/minikube-linux-arm64 addons enable metrics-server -p old-k8s-version-231557 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain
start_stop_delete_test.go:215: (dbg) Run:  kubectl --context old-k8s-version-231557 describe deploy/metrics-server -n kube-system
--- PASS: TestStartStop/group/old-k8s-version/serial/EnableAddonWhileActive (1.18s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/Stop (12.51s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/Stop
start_stop_delete_test.go:228: (dbg) Run:  out/minikube-linux-arm64 stop -p old-k8s-version-231557 --alsologtostderr -v=3
E1201 19:34:24.386714  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.crt: no such file or directory
start_stop_delete_test.go:228: (dbg) Done: out/minikube-linux-arm64 stop -p old-k8s-version-231557 --alsologtostderr -v=3: (12.51135864s)
--- PASS: TestStartStop/group/old-k8s-version/serial/Stop (12.51s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/EnableAddonAfterStop (0.28s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/EnableAddonAfterStop
start_stop_delete_test.go:239: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p old-k8s-version-231557 -n old-k8s-version-231557
start_stop_delete_test.go:239: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Host}} -p old-k8s-version-231557 -n old-k8s-version-231557: exit status 7 (113.322439ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:239: status error: exit status 7 (may be ok)
start_stop_delete_test.go:246: (dbg) Run:  out/minikube-linux-arm64 addons enable dashboard -p old-k8s-version-231557 --images=MetricsScraper=registry.k8s.io/echoserver:1.4
--- PASS: TestStartStop/group/old-k8s-version/serial/EnableAddonAfterStop (0.28s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/SecondStart (652.2s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/SecondStart
start_stop_delete_test.go:256: (dbg) Run:  out/minikube-linux-arm64 start -p old-k8s-version-231557 --memory=2200 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker  --container-runtime=containerd --kubernetes-version=v1.16.0
E1201 19:34:37.131102  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.crt: no such file or directory
E1201 19:35:19.452117  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/client.crt: no such file or directory
start_stop_delete_test.go:256: (dbg) Done: out/minikube-linux-arm64 start -p old-k8s-version-231557 --memory=2200 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker  --container-runtime=containerd --kubernetes-version=v1.16.0: (10m51.76996108s)
start_stop_delete_test.go:262: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p old-k8s-version-231557 -n old-k8s-version-231557
--- PASS: TestStartStop/group/old-k8s-version/serial/SecondStart (652.20s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/DeployApp (8.53s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/DeployApp
start_stop_delete_test.go:196: (dbg) Run:  kubectl --context default-k8s-diff-port-658051 create -f testdata/busybox.yaml
start_stop_delete_test.go:196: (dbg) TestStartStop/group/default-k8s-diff-port/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:344: "busybox" [d5828524-d2d3-44c5-821c-518fb9837985] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:344: "busybox" [d5828524-d2d3-44c5-821c-518fb9837985] Running
start_stop_delete_test.go:196: (dbg) TestStartStop/group/default-k8s-diff-port/serial/DeployApp: integration-test=busybox healthy within 8.034819576s
start_stop_delete_test.go:196: (dbg) Run:  kubectl --context default-k8s-diff-port-658051 exec busybox -- /bin/sh -c "ulimit -n"
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/DeployApp (8.53s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/EnableAddonWhileActive (1.3s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/EnableAddonWhileActive
start_stop_delete_test.go:205: (dbg) Run:  out/minikube-linux-arm64 addons enable metrics-server -p default-k8s-diff-port-658051 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain
start_stop_delete_test.go:205: (dbg) Done: out/minikube-linux-arm64 addons enable metrics-server -p default-k8s-diff-port-658051 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain: (1.158379444s)
start_stop_delete_test.go:215: (dbg) Run:  kubectl --context default-k8s-diff-port-658051 describe deploy/metrics-server -n kube-system
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/EnableAddonWhileActive (1.30s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/Stop (12.26s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/Stop
start_stop_delete_test.go:228: (dbg) Run:  out/minikube-linux-arm64 stop -p default-k8s-diff-port-658051 --alsologtostderr -v=3
start_stop_delete_test.go:228: (dbg) Done: out/minikube-linux-arm64 stop -p default-k8s-diff-port-658051 --alsologtostderr -v=3: (12.258600897s)
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/Stop (12.26s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/EnableAddonAfterStop (0.25s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/EnableAddonAfterStop
start_stop_delete_test.go:239: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p default-k8s-diff-port-658051 -n default-k8s-diff-port-658051
start_stop_delete_test.go:239: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Host}} -p default-k8s-diff-port-658051 -n default-k8s-diff-port-658051: exit status 7 (113.095458ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:239: status error: exit status 7 (may be ok)
start_stop_delete_test.go:246: (dbg) Run:  out/minikube-linux-arm64 addons enable dashboard -p default-k8s-diff-port-658051 --images=MetricsScraper=registry.k8s.io/echoserver:1.4
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/EnableAddonAfterStop (0.25s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/SecondStart (338.72s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/SecondStart
start_stop_delete_test.go:256: (dbg) Run:  out/minikube-linux-arm64 start -p default-k8s-diff-port-658051 --memory=2200 --alsologtostderr --wait=true --apiserver-port=8444 --driver=docker  --container-runtime=containerd --kubernetes-version=v1.28.4
E1201 19:38:22.495545  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/client.crt: no such file or directory
E1201 19:39:24.386216  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.crt: no such file or directory
E1201 19:39:37.130426  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.crt: no such file or directory
E1201 19:40:19.451858  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/client.crt: no such file or directory
start_stop_delete_test.go:256: (dbg) Done: out/minikube-linux-arm64 start -p default-k8s-diff-port-658051 --memory=2200 --alsologtostderr --wait=true --apiserver-port=8444 --driver=docker  --container-runtime=containerd --kubernetes-version=v1.28.4: (5m38.267027195s)
start_stop_delete_test.go:262: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p default-k8s-diff-port-658051 -n default-k8s-diff-port-658051
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/SecondStart (338.72s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop (13.03s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop
start_stop_delete_test.go:274: (dbg) TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:344: "kubernetes-dashboard-8694d4445c-585vp" [58b04c1e-1b8b-4a55-a626-1dd0dc48ab7f] Pending / Ready:ContainersNotReady (containers with unready status: [kubernetes-dashboard]) / ContainersReady:ContainersNotReady (containers with unready status: [kubernetes-dashboard])
helpers_test.go:344: "kubernetes-dashboard-8694d4445c-585vp" [58b04c1e-1b8b-4a55-a626-1dd0dc48ab7f] Running
start_stop_delete_test.go:274: (dbg) TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 13.027968503s
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop (13.03s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop (5.12s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop
start_stop_delete_test.go:287: (dbg) TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:344: "kubernetes-dashboard-8694d4445c-585vp" [58b04c1e-1b8b-4a55-a626-1dd0dc48ab7f] Running
start_stop_delete_test.go:287: (dbg) TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 5.012362575s
start_stop_delete_test.go:291: (dbg) Run:  kubectl --context default-k8s-diff-port-658051 describe deploy/dashboard-metrics-scraper -n kubernetes-dashboard
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop (5.12s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/VerifyKubernetesImages (0.32s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/VerifyKubernetesImages
start_stop_delete_test.go:304: (dbg) Run:  out/minikube-linux-arm64 -p default-k8s-diff-port-658051 image list --format=json
start_stop_delete_test.go:304: Found non-minikube image: gcr.io/k8s-minikube/busybox:1.28.4-glibc
start_stop_delete_test.go:304: Found non-minikube image: kindest/kindnetd:v20230809-80a64d96
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/VerifyKubernetesImages (0.32s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/Pause (3.64s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/Pause
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 pause -p default-k8s-diff-port-658051 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p default-k8s-diff-port-658051 -n default-k8s-diff-port-658051
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.APIServer}} -p default-k8s-diff-port-658051 -n default-k8s-diff-port-658051: exit status 2 (380.775234ms)

                                                
                                                
-- stdout --
	Paused

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p default-k8s-diff-port-658051 -n default-k8s-diff-port-658051
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Kubelet}} -p default-k8s-diff-port-658051 -n default-k8s-diff-port-658051: exit status 2 (392.606262ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 unpause -p default-k8s-diff-port-658051 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p default-k8s-diff-port-658051 -n default-k8s-diff-port-658051
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p default-k8s-diff-port-658051 -n default-k8s-diff-port-658051
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/Pause (3.64s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/FirstStart (59.57s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/FirstStart
start_stop_delete_test.go:186: (dbg) Run:  out/minikube-linux-arm64 start -p embed-certs-603192 --memory=2200 --alsologtostderr --wait=true --embed-certs --driver=docker  --container-runtime=containerd --kubernetes-version=v1.28.4
E1201 19:42:40.183173  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.crt: no such file or directory
start_stop_delete_test.go:186: (dbg) Done: out/minikube-linux-arm64 start -p embed-certs-603192 --memory=2200 --alsologtostderr --wait=true --embed-certs --driver=docker  --container-runtime=containerd --kubernetes-version=v1.28.4: (59.568838142s)
--- PASS: TestStartStop/group/embed-certs/serial/FirstStart (59.57s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/DeployApp (8.48s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/DeployApp
start_stop_delete_test.go:196: (dbg) Run:  kubectl --context embed-certs-603192 create -f testdata/busybox.yaml
start_stop_delete_test.go:196: (dbg) TestStartStop/group/embed-certs/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:344: "busybox" [d00bbdea-4d13-40c4-a420-09b5f3773f1b] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:344: "busybox" [d00bbdea-4d13-40c4-a420-09b5f3773f1b] Running
start_stop_delete_test.go:196: (dbg) TestStartStop/group/embed-certs/serial/DeployApp: integration-test=busybox healthy within 8.033770863s
start_stop_delete_test.go:196: (dbg) Run:  kubectl --context embed-certs-603192 exec busybox -- /bin/sh -c "ulimit -n"
--- PASS: TestStartStop/group/embed-certs/serial/DeployApp (8.48s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/EnableAddonWhileActive (1.25s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/EnableAddonWhileActive
start_stop_delete_test.go:205: (dbg) Run:  out/minikube-linux-arm64 addons enable metrics-server -p embed-certs-603192 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain
start_stop_delete_test.go:205: (dbg) Done: out/minikube-linux-arm64 addons enable metrics-server -p embed-certs-603192 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain: (1.132155557s)
start_stop_delete_test.go:215: (dbg) Run:  kubectl --context embed-certs-603192 describe deploy/metrics-server -n kube-system
--- PASS: TestStartStop/group/embed-certs/serial/EnableAddonWhileActive (1.25s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/Stop (12.27s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/Stop
start_stop_delete_test.go:228: (dbg) Run:  out/minikube-linux-arm64 stop -p embed-certs-603192 --alsologtostderr -v=3
start_stop_delete_test.go:228: (dbg) Done: out/minikube-linux-arm64 stop -p embed-certs-603192 --alsologtostderr -v=3: (12.272709972s)
--- PASS: TestStartStop/group/embed-certs/serial/Stop (12.27s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/EnableAddonAfterStop (0.24s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/EnableAddonAfterStop
start_stop_delete_test.go:239: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p embed-certs-603192 -n embed-certs-603192
start_stop_delete_test.go:239: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Host}} -p embed-certs-603192 -n embed-certs-603192: exit status 7 (103.25564ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:239: status error: exit status 7 (may be ok)
start_stop_delete_test.go:246: (dbg) Run:  out/minikube-linux-arm64 addons enable dashboard -p embed-certs-603192 --images=MetricsScraper=registry.k8s.io/echoserver:1.4
--- PASS: TestStartStop/group/embed-certs/serial/EnableAddonAfterStop (0.24s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/SecondStart (334.54s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/SecondStart
start_stop_delete_test.go:256: (dbg) Run:  out/minikube-linux-arm64 start -p embed-certs-603192 --memory=2200 --alsologtostderr --wait=true --embed-certs --driver=docker  --container-runtime=containerd --kubernetes-version=v1.28.4
E1201 19:44:24.385773  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.crt: no such file or directory
E1201 19:44:37.130358  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.crt: no such file or directory
E1201 19:45:19.452666  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/client.crt: no such file or directory
start_stop_delete_test.go:256: (dbg) Done: out/minikube-linux-arm64 start -p embed-certs-603192 --memory=2200 --alsologtostderr --wait=true --embed-certs --driver=docker  --container-runtime=containerd --kubernetes-version=v1.28.4: (5m34.022270085s)
start_stop_delete_test.go:262: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p embed-certs-603192 -n embed-certs-603192
--- PASS: TestStartStop/group/embed-certs/serial/SecondStart (334.54s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop (5.03s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop
start_stop_delete_test.go:274: (dbg) TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:344: "kubernetes-dashboard-84b68f675b-cp2pt" [6b7f5de8-9586-4070-bcf8-e59f599f05bd] Running
start_stop_delete_test.go:274: (dbg) TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 5.024870744s
--- PASS: TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop (5.03s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop (5.11s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop
start_stop_delete_test.go:287: (dbg) TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:344: "kubernetes-dashboard-84b68f675b-cp2pt" [6b7f5de8-9586-4070-bcf8-e59f599f05bd] Running
start_stop_delete_test.go:287: (dbg) TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 5.00899263s
start_stop_delete_test.go:291: (dbg) Run:  kubectl --context old-k8s-version-231557 describe deploy/dashboard-metrics-scraper -n kubernetes-dashboard
--- PASS: TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop (5.11s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/VerifyKubernetesImages (0.28s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/VerifyKubernetesImages
start_stop_delete_test.go:304: (dbg) Run:  out/minikube-linux-arm64 -p old-k8s-version-231557 image list --format=json
start_stop_delete_test.go:304: Found non-minikube image: gcr.io/k8s-minikube/busybox:1.28.4-glibc
start_stop_delete_test.go:304: Found non-minikube image: kindest/kindnetd:v20210326-1e038dc5
start_stop_delete_test.go:304: Found non-minikube image: kindest/kindnetd:v20230809-80a64d96
--- PASS: TestStartStop/group/old-k8s-version/serial/VerifyKubernetesImages (0.28s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/Pause (3.7s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/Pause
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 pause -p old-k8s-version-231557 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p old-k8s-version-231557 -n old-k8s-version-231557
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.APIServer}} -p old-k8s-version-231557 -n old-k8s-version-231557: exit status 2 (395.302163ms)

                                                
                                                
-- stdout --
	Paused

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p old-k8s-version-231557 -n old-k8s-version-231557
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Kubelet}} -p old-k8s-version-231557 -n old-k8s-version-231557: exit status 2 (386.303016ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 unpause -p old-k8s-version-231557 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p old-k8s-version-231557 -n old-k8s-version-231557
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p old-k8s-version-231557 -n old-k8s-version-231557
--- PASS: TestStartStop/group/old-k8s-version/serial/Pause (3.70s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/FirstStart (70.59s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/FirstStart
start_stop_delete_test.go:186: (dbg) Run:  out/minikube-linux-arm64 start -p no-preload-253613 --memory=2200 --alsologtostderr --wait=true --preload=false --driver=docker  --container-runtime=containerd --kubernetes-version=v1.29.0-rc.1
E1201 19:45:43.059167  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/default-k8s-diff-port-658051/client.crt: no such file or directory
E1201 19:45:43.064864  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/default-k8s-diff-port-658051/client.crt: no such file or directory
E1201 19:45:43.075116  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/default-k8s-diff-port-658051/client.crt: no such file or directory
E1201 19:45:43.095365  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/default-k8s-diff-port-658051/client.crt: no such file or directory
E1201 19:45:43.143084  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/default-k8s-diff-port-658051/client.crt: no such file or directory
E1201 19:45:43.223316  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/default-k8s-diff-port-658051/client.crt: no such file or directory
E1201 19:45:43.383668  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/default-k8s-diff-port-658051/client.crt: no such file or directory
E1201 19:45:43.706954  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/default-k8s-diff-port-658051/client.crt: no such file or directory
E1201 19:45:44.347392  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/default-k8s-diff-port-658051/client.crt: no such file or directory
E1201 19:45:45.627828  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/default-k8s-diff-port-658051/client.crt: no such file or directory
E1201 19:45:48.188096  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/default-k8s-diff-port-658051/client.crt: no such file or directory
E1201 19:45:53.309194  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/default-k8s-diff-port-658051/client.crt: no such file or directory
E1201 19:46:03.549400  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/default-k8s-diff-port-658051/client.crt: no such file or directory
E1201 19:46:24.030256  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/default-k8s-diff-port-658051/client.crt: no such file or directory
start_stop_delete_test.go:186: (dbg) Done: out/minikube-linux-arm64 start -p no-preload-253613 --memory=2200 --alsologtostderr --wait=true --preload=false --driver=docker  --container-runtime=containerd --kubernetes-version=v1.29.0-rc.1: (1m10.590090914s)
--- PASS: TestStartStop/group/no-preload/serial/FirstStart (70.59s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/DeployApp (10.02s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/DeployApp
start_stop_delete_test.go:196: (dbg) Run:  kubectl --context no-preload-253613 create -f testdata/busybox.yaml
start_stop_delete_test.go:196: (dbg) TestStartStop/group/no-preload/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:344: "busybox" [c03adad8-0e67-460c-8c3a-1a491db0862b] Pending
helpers_test.go:344: "busybox" [c03adad8-0e67-460c-8c3a-1a491db0862b] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:344: "busybox" [c03adad8-0e67-460c-8c3a-1a491db0862b] Running
start_stop_delete_test.go:196: (dbg) TestStartStop/group/no-preload/serial/DeployApp: integration-test=busybox healthy within 9.034678394s
start_stop_delete_test.go:196: (dbg) Run:  kubectl --context no-preload-253613 exec busybox -- /bin/sh -c "ulimit -n"
--- PASS: TestStartStop/group/no-preload/serial/DeployApp (10.02s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/EnableAddonWhileActive (1.23s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/EnableAddonWhileActive
start_stop_delete_test.go:205: (dbg) Run:  out/minikube-linux-arm64 addons enable metrics-server -p no-preload-253613 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain
start_stop_delete_test.go:205: (dbg) Done: out/minikube-linux-arm64 addons enable metrics-server -p no-preload-253613 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain: (1.107748159s)
start_stop_delete_test.go:215: (dbg) Run:  kubectl --context no-preload-253613 describe deploy/metrics-server -n kube-system
--- PASS: TestStartStop/group/no-preload/serial/EnableAddonWhileActive (1.23s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/Stop (12.32s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/Stop
start_stop_delete_test.go:228: (dbg) Run:  out/minikube-linux-arm64 stop -p no-preload-253613 --alsologtostderr -v=3
E1201 19:47:04.990875  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/default-k8s-diff-port-658051/client.crt: no such file or directory
start_stop_delete_test.go:228: (dbg) Done: out/minikube-linux-arm64 stop -p no-preload-253613 --alsologtostderr -v=3: (12.319618958s)
--- PASS: TestStartStop/group/no-preload/serial/Stop (12.32s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/EnableAddonAfterStop (0.25s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/EnableAddonAfterStop
start_stop_delete_test.go:239: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p no-preload-253613 -n no-preload-253613
start_stop_delete_test.go:239: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Host}} -p no-preload-253613 -n no-preload-253613: exit status 7 (102.117418ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:239: status error: exit status 7 (may be ok)
start_stop_delete_test.go:246: (dbg) Run:  out/minikube-linux-arm64 addons enable dashboard -p no-preload-253613 --images=MetricsScraper=registry.k8s.io/echoserver:1.4
--- PASS: TestStartStop/group/no-preload/serial/EnableAddonAfterStop (0.25s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/SecondStart (317.87s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/SecondStart
start_stop_delete_test.go:256: (dbg) Run:  out/minikube-linux-arm64 start -p no-preload-253613 --memory=2200 --alsologtostderr --wait=true --preload=false --driver=docker  --container-runtime=containerd --kubernetes-version=v1.29.0-rc.1
E1201 19:48:26.911097  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/default-k8s-diff-port-658051/client.crt: no such file or directory
start_stop_delete_test.go:256: (dbg) Done: out/minikube-linux-arm64 start -p no-preload-253613 --memory=2200 --alsologtostderr --wait=true --preload=false --driver=docker  --container-runtime=containerd --kubernetes-version=v1.29.0-rc.1: (5m17.27562118s)
start_stop_delete_test.go:262: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p no-preload-253613 -n no-preload-253613
--- PASS: TestStartStop/group/no-preload/serial/SecondStart (317.87s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop (15.03s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop
start_stop_delete_test.go:274: (dbg) TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:344: "kubernetes-dashboard-8694d4445c-gbxct" [daf0ad8a-e453-414f-b8c4-87844445eed6] Pending / Ready:ContainersNotReady (containers with unready status: [kubernetes-dashboard]) / ContainersReady:ContainersNotReady (containers with unready status: [kubernetes-dashboard])
E1201 19:49:07.434331  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.crt: no such file or directory
E1201 19:49:10.086350  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/old-k8s-version-231557/client.crt: no such file or directory
E1201 19:49:10.091639  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/old-k8s-version-231557/client.crt: no such file or directory
E1201 19:49:10.101967  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/old-k8s-version-231557/client.crt: no such file or directory
E1201 19:49:10.122263  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/old-k8s-version-231557/client.crt: no such file or directory
E1201 19:49:10.162907  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/old-k8s-version-231557/client.crt: no such file or directory
E1201 19:49:10.243338  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/old-k8s-version-231557/client.crt: no such file or directory
E1201 19:49:10.407886  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/old-k8s-version-231557/client.crt: no such file or directory
E1201 19:49:10.728363  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/old-k8s-version-231557/client.crt: no such file or directory
E1201 19:49:11.369102  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/old-k8s-version-231557/client.crt: no such file or directory
E1201 19:49:12.649930  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/old-k8s-version-231557/client.crt: no such file or directory
helpers_test.go:344: "kubernetes-dashboard-8694d4445c-gbxct" [daf0ad8a-e453-414f-b8c4-87844445eed6] Running
E1201 19:49:15.210639  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/old-k8s-version-231557/client.crt: no such file or directory
E1201 19:49:20.331091  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/old-k8s-version-231557/client.crt: no such file or directory
start_stop_delete_test.go:274: (dbg) TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 15.026246597s
--- PASS: TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop (15.03s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/AddonExistsAfterStop (5.11s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/AddonExistsAfterStop
start_stop_delete_test.go:287: (dbg) TestStartStop/group/embed-certs/serial/AddonExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:344: "kubernetes-dashboard-8694d4445c-gbxct" [daf0ad8a-e453-414f-b8c4-87844445eed6] Running
E1201 19:49:24.386427  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.crt: no such file or directory
start_stop_delete_test.go:287: (dbg) TestStartStop/group/embed-certs/serial/AddonExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 5.010975599s
start_stop_delete_test.go:291: (dbg) Run:  kubectl --context embed-certs-603192 describe deploy/dashboard-metrics-scraper -n kubernetes-dashboard
--- PASS: TestStartStop/group/embed-certs/serial/AddonExistsAfterStop (5.11s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/VerifyKubernetesImages (0.33s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/VerifyKubernetesImages
start_stop_delete_test.go:304: (dbg) Run:  out/minikube-linux-arm64 -p embed-certs-603192 image list --format=json
start_stop_delete_test.go:304: Found non-minikube image: kindest/kindnetd:v20230809-80a64d96
start_stop_delete_test.go:304: Found non-minikube image: gcr.io/k8s-minikube/busybox:1.28.4-glibc
--- PASS: TestStartStop/group/embed-certs/serial/VerifyKubernetesImages (0.33s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/Pause (3.63s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/Pause
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 pause -p embed-certs-603192 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p embed-certs-603192 -n embed-certs-603192
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.APIServer}} -p embed-certs-603192 -n embed-certs-603192: exit status 2 (413.91197ms)

                                                
                                                
-- stdout --
	Paused

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p embed-certs-603192 -n embed-certs-603192
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Kubelet}} -p embed-certs-603192 -n embed-certs-603192: exit status 2 (400.004458ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 unpause -p embed-certs-603192 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p embed-certs-603192 -n embed-certs-603192
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p embed-certs-603192 -n embed-certs-603192
--- PASS: TestStartStop/group/embed-certs/serial/Pause (3.63s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/FirstStart (48.12s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/FirstStart
start_stop_delete_test.go:186: (dbg) Run:  out/minikube-linux-arm64 start -p newest-cni-952454 --memory=2200 --alsologtostderr --wait=apiserver,system_pods,default_sa --feature-gates ServerSideApply=true --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 --driver=docker  --container-runtime=containerd --kubernetes-version=v1.29.0-rc.1
E1201 19:49:37.131117  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.crt: no such file or directory
E1201 19:49:51.051957  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/old-k8s-version-231557/client.crt: no such file or directory
E1201 19:50:19.452129  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/client.crt: no such file or directory
start_stop_delete_test.go:186: (dbg) Done: out/minikube-linux-arm64 start -p newest-cni-952454 --memory=2200 --alsologtostderr --wait=apiserver,system_pods,default_sa --feature-gates ServerSideApply=true --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 --driver=docker  --container-runtime=containerd --kubernetes-version=v1.29.0-rc.1: (48.119945563s)
--- PASS: TestStartStop/group/newest-cni/serial/FirstStart (48.12s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/DeployApp (0s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/DeployApp
--- PASS: TestStartStop/group/newest-cni/serial/DeployApp (0.00s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/EnableAddonWhileActive (1.24s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/EnableAddonWhileActive
start_stop_delete_test.go:205: (dbg) Run:  out/minikube-linux-arm64 addons enable metrics-server -p newest-cni-952454 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain
start_stop_delete_test.go:205: (dbg) Done: out/minikube-linux-arm64 addons enable metrics-server -p newest-cni-952454 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain: (1.236041872s)
start_stop_delete_test.go:211: WARNING: cni mode requires additional setup before pods can schedule :(
--- PASS: TestStartStop/group/newest-cni/serial/EnableAddonWhileActive (1.24s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/Stop (1.31s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/Stop
start_stop_delete_test.go:228: (dbg) Run:  out/minikube-linux-arm64 stop -p newest-cni-952454 --alsologtostderr -v=3
start_stop_delete_test.go:228: (dbg) Done: out/minikube-linux-arm64 stop -p newest-cni-952454 --alsologtostderr -v=3: (1.31180101s)
--- PASS: TestStartStop/group/newest-cni/serial/Stop (1.31s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/EnableAddonAfterStop (0.23s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/EnableAddonAfterStop
start_stop_delete_test.go:239: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p newest-cni-952454 -n newest-cni-952454
start_stop_delete_test.go:239: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Host}} -p newest-cni-952454 -n newest-cni-952454: exit status 7 (89.754729ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:239: status error: exit status 7 (may be ok)
start_stop_delete_test.go:246: (dbg) Run:  out/minikube-linux-arm64 addons enable dashboard -p newest-cni-952454 --images=MetricsScraper=registry.k8s.io/echoserver:1.4
--- PASS: TestStartStop/group/newest-cni/serial/EnableAddonAfterStop (0.23s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/SecondStart (30.34s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/SecondStart
start_stop_delete_test.go:256: (dbg) Run:  out/minikube-linux-arm64 start -p newest-cni-952454 --memory=2200 --alsologtostderr --wait=apiserver,system_pods,default_sa --feature-gates ServerSideApply=true --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 --driver=docker  --container-runtime=containerd --kubernetes-version=v1.29.0-rc.1
E1201 19:50:32.012864  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/old-k8s-version-231557/client.crt: no such file or directory
E1201 19:50:43.054925  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/default-k8s-diff-port-658051/client.crt: no such file or directory
start_stop_delete_test.go:256: (dbg) Done: out/minikube-linux-arm64 start -p newest-cni-952454 --memory=2200 --alsologtostderr --wait=apiserver,system_pods,default_sa --feature-gates ServerSideApply=true --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 --driver=docker  --container-runtime=containerd --kubernetes-version=v1.29.0-rc.1: (29.917967226s)
start_stop_delete_test.go:262: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p newest-cni-952454 -n newest-cni-952454
--- PASS: TestStartStop/group/newest-cni/serial/SecondStart (30.34s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/UserAppExistsAfterStop (0s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/UserAppExistsAfterStop
start_stop_delete_test.go:273: WARNING: cni mode requires additional setup before pods can schedule :(
--- PASS: TestStartStop/group/newest-cni/serial/UserAppExistsAfterStop (0.00s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/AddonExistsAfterStop (0s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/AddonExistsAfterStop
start_stop_delete_test.go:284: WARNING: cni mode requires additional setup before pods can schedule :(
--- PASS: TestStartStop/group/newest-cni/serial/AddonExistsAfterStop (0.00s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/VerifyKubernetesImages (0.31s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/VerifyKubernetesImages
start_stop_delete_test.go:304: (dbg) Run:  out/minikube-linux-arm64 -p newest-cni-952454 image list --format=json
start_stop_delete_test.go:304: Found non-minikube image: kindest/kindnetd:v20230809-80a64d96
--- PASS: TestStartStop/group/newest-cni/serial/VerifyKubernetesImages (0.31s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/Pause (3.51s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/Pause
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 pause -p newest-cni-952454 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p newest-cni-952454 -n newest-cni-952454
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.APIServer}} -p newest-cni-952454 -n newest-cni-952454: exit status 2 (403.119733ms)

                                                
                                                
-- stdout --
	Paused

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p newest-cni-952454 -n newest-cni-952454
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Kubelet}} -p newest-cni-952454 -n newest-cni-952454: exit status 2 (390.141943ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 unpause -p newest-cni-952454 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p newest-cni-952454 -n newest-cni-952454
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p newest-cni-952454 -n newest-cni-952454
--- PASS: TestStartStop/group/newest-cni/serial/Pause (3.51s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/Start (61.89s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p auto-730581 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --driver=docker  --container-runtime=containerd
E1201 19:51:10.752050  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/default-k8s-diff-port-658051/client.crt: no such file or directory
E1201 19:51:53.933026  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/old-k8s-version-231557/client.crt: no such file or directory
net_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p auto-730581 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --driver=docker  --container-runtime=containerd: (1m1.884638959s)
--- PASS: TestNetworkPlugins/group/auto/Start (61.89s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/KubeletFlags (0.53s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-arm64 ssh -p auto-730581 "pgrep -a kubelet"
--- PASS: TestNetworkPlugins/group/auto/KubeletFlags (0.53s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/NetCatPod (10.47s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context auto-730581 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/auto/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:344: "netcat-56589dfd74-kd82x" [cddc228d-c2bc-457c-be2d-563edb430768] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:344: "netcat-56589dfd74-kd82x" [cddc228d-c2bc-457c-be2d-563edb430768] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/auto/NetCatPod: app=netcat healthy within 10.011040021s
--- PASS: TestNetworkPlugins/group/auto/NetCatPod (10.47s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/DNS (0.22s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/DNS
net_test.go:175: (dbg) Run:  kubectl --context auto-730581 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/auto/DNS (0.22s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/Localhost (0.22s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/Localhost
net_test.go:194: (dbg) Run:  kubectl --context auto-730581 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/auto/Localhost (0.22s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/HairPin (0.2s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/HairPin
net_test.go:264: (dbg) Run:  kubectl --context auto-730581 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/auto/HairPin (0.20s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/UserAppExistsAfterStop (12.03s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/UserAppExistsAfterStop
start_stop_delete_test.go:274: (dbg) TestStartStop/group/no-preload/serial/UserAppExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:344: "kubernetes-dashboard-8694d4445c-n7vqb" [abdb6dd6-b162-42d1-8871-ef8430e16c96] Pending / Ready:ContainersNotReady (containers with unready status: [kubernetes-dashboard]) / ContainersReady:ContainersNotReady (containers with unready status: [kubernetes-dashboard])
helpers_test.go:344: "kubernetes-dashboard-8694d4445c-n7vqb" [abdb6dd6-b162-42d1-8871-ef8430e16c96] Running
start_stop_delete_test.go:274: (dbg) TestStartStop/group/no-preload/serial/UserAppExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 12.029458472s
--- PASS: TestStartStop/group/no-preload/serial/UserAppExistsAfterStop (12.03s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/Start (67.36s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p flannel-730581 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=flannel --driver=docker  --container-runtime=containerd
net_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p flannel-730581 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=flannel --driver=docker  --container-runtime=containerd: (1m7.360964911s)
--- PASS: TestNetworkPlugins/group/flannel/Start (67.36s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/AddonExistsAfterStop (5.15s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/AddonExistsAfterStop
start_stop_delete_test.go:287: (dbg) TestStartStop/group/no-preload/serial/AddonExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:344: "kubernetes-dashboard-8694d4445c-n7vqb" [abdb6dd6-b162-42d1-8871-ef8430e16c96] Running
start_stop_delete_test.go:287: (dbg) TestStartStop/group/no-preload/serial/AddonExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 5.012582537s
start_stop_delete_test.go:291: (dbg) Run:  kubectl --context no-preload-253613 describe deploy/dashboard-metrics-scraper -n kubernetes-dashboard
--- PASS: TestStartStop/group/no-preload/serial/AddonExistsAfterStop (5.15s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/VerifyKubernetesImages (0.35s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/VerifyKubernetesImages
start_stop_delete_test.go:304: (dbg) Run:  out/minikube-linux-arm64 -p no-preload-253613 image list --format=json
start_stop_delete_test.go:304: Found non-minikube image: kindest/kindnetd:v20230809-80a64d96
start_stop_delete_test.go:304: Found non-minikube image: gcr.io/k8s-minikube/busybox:1.28.4-glibc
--- PASS: TestStartStop/group/no-preload/serial/VerifyKubernetesImages (0.35s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/Pause (5.02s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/Pause
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 pause -p no-preload-253613 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Done: out/minikube-linux-arm64 pause -p no-preload-253613 --alsologtostderr -v=1: (1.20372668s)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p no-preload-253613 -n no-preload-253613
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.APIServer}} -p no-preload-253613 -n no-preload-253613: exit status 2 (511.51559ms)

                                                
                                                
-- stdout --
	Paused

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p no-preload-253613 -n no-preload-253613
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Kubelet}} -p no-preload-253613 -n no-preload-253613: exit status 2 (512.441044ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 unpause -p no-preload-253613 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Done: out/minikube-linux-arm64 unpause -p no-preload-253613 --alsologtostderr -v=1: (1.156915384s)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p no-preload-253613 -n no-preload-253613
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p no-preload-253613 -n no-preload-253613
--- PASS: TestStartStop/group/no-preload/serial/Pause (5.02s)
E1201 19:58:25.309368  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/auto-730581/client.crt: no such file or directory

                                                
                                    
x
+
TestNetworkPlugins/group/calico/Start (69.54s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p calico-730581 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=calico --driver=docker  --container-runtime=containerd
net_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p calico-730581 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=calico --driver=docker  --container-runtime=containerd: (1m9.537031217s)
--- PASS: TestNetworkPlugins/group/calico/Start (69.54s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/ControllerPod (5.05s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/ControllerPod
net_test.go:120: (dbg) TestNetworkPlugins/group/flannel/ControllerPod: waiting 10m0s for pods matching "app=flannel" in namespace "kube-flannel" ...
helpers_test.go:344: "kube-flannel-ds-2xpnz" [bc161f34-947b-4641-9ed9-2708fc4bd11a] Running
net_test.go:120: (dbg) TestNetworkPlugins/group/flannel/ControllerPod: app=flannel healthy within 5.047073172s
--- PASS: TestNetworkPlugins/group/flannel/ControllerPod (5.05s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/KubeletFlags (0.38s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-arm64 ssh -p flannel-730581 "pgrep -a kubelet"
--- PASS: TestNetworkPlugins/group/flannel/KubeletFlags (0.38s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/NetCatPod (11.43s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context flannel-730581 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/flannel/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:344: "netcat-56589dfd74-49whg" [d8b560c6-9606-4a48-8a39-122b6a6ea828] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:344: "netcat-56589dfd74-49whg" [d8b560c6-9606-4a48-8a39-122b6a6ea828] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/flannel/NetCatPod: app=netcat healthy within 11.016554979s
--- PASS: TestNetworkPlugins/group/flannel/NetCatPod (11.43s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/DNS (0.26s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/DNS
net_test.go:175: (dbg) Run:  kubectl --context flannel-730581 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/flannel/DNS (0.26s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/Localhost (0.22s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/Localhost
net_test.go:194: (dbg) Run:  kubectl --context flannel-730581 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/flannel/Localhost (0.22s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/HairPin (0.2s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/HairPin
net_test.go:264: (dbg) Run:  kubectl --context flannel-730581 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/flannel/HairPin (0.20s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/ControllerPod (5.05s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/ControllerPod
net_test.go:120: (dbg) TestNetworkPlugins/group/calico/ControllerPod: waiting 10m0s for pods matching "k8s-app=calico-node" in namespace "kube-system" ...
helpers_test.go:344: "calico-node-lffzb" [87c38a2e-2ef0-4034-bb17-3f06198576c8] Running
E1201 19:54:10.086674  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/old-k8s-version-231557/client.crt: no such file or directory
net_test.go:120: (dbg) TestNetworkPlugins/group/calico/ControllerPod: k8s-app=calico-node healthy within 5.04925813s
--- PASS: TestNetworkPlugins/group/calico/ControllerPod (5.05s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/KubeletFlags (0.45s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-arm64 ssh -p calico-730581 "pgrep -a kubelet"
--- PASS: TestNetworkPlugins/group/calico/KubeletFlags (0.45s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/NetCatPod (10.65s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context calico-730581 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/calico/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:344: "netcat-56589dfd74-k4c7m" [29143c04-e45b-4001-a03c-0c6c7e203e55] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:344: "netcat-56589dfd74-k4c7m" [29143c04-e45b-4001-a03c-0c6c7e203e55] Running
E1201 19:54:24.386276  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/addons-488129/client.crt: no such file or directory
net_test.go:163: (dbg) TestNetworkPlugins/group/calico/NetCatPod: app=netcat healthy within 10.02710064s
--- PASS: TestNetworkPlugins/group/calico/NetCatPod (10.65s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/DNS (0.32s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/DNS
net_test.go:175: (dbg) Run:  kubectl --context calico-730581 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/calico/DNS (0.32s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/Localhost (0.3s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/Localhost
net_test.go:194: (dbg) Run:  kubectl --context calico-730581 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/calico/Localhost (0.30s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/HairPin (0.24s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/HairPin
net_test.go:264: (dbg) Run:  kubectl --context calico-730581 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/calico/HairPin (0.24s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/Start (71.22s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p custom-flannel-730581 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=testdata/kube-flannel.yaml --driver=docker  --container-runtime=containerd
E1201 19:54:37.130953  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/ingress-addon-legacy-853196/client.crt: no such file or directory
E1201 19:54:37.773573  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/old-k8s-version-231557/client.crt: no such file or directory
net_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p custom-flannel-730581 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=testdata/kube-flannel.yaml --driver=docker  --container-runtime=containerd: (1m11.220709408s)
--- PASS: TestNetworkPlugins/group/custom-flannel/Start (71.22s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/Start (102.74s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p kindnet-730581 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=kindnet --driver=docker  --container-runtime=containerd
E1201 19:55:02.496154  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/client.crt: no such file or directory
E1201 19:55:19.452319  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/functional-616785/client.crt: no such file or directory
E1201 19:55:43.055361  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/default-k8s-diff-port-658051/client.crt: no such file or directory
net_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p kindnet-730581 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=kindnet --driver=docker  --container-runtime=containerd: (1m42.740503941s)
--- PASS: TestNetworkPlugins/group/kindnet/Start (102.74s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/KubeletFlags (0.44s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-arm64 ssh -p custom-flannel-730581 "pgrep -a kubelet"
--- PASS: TestNetworkPlugins/group/custom-flannel/KubeletFlags (0.44s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/NetCatPod (11.49s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context custom-flannel-730581 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/custom-flannel/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:344: "netcat-56589dfd74-dz8zq" [01b0dd45-bac3-4370-83cb-442264aff200] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:344: "netcat-56589dfd74-dz8zq" [01b0dd45-bac3-4370-83cb-442264aff200] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/custom-flannel/NetCatPod: app=netcat healthy within 11.014383723s
--- PASS: TestNetworkPlugins/group/custom-flannel/NetCatPod (11.49s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/DNS (0.21s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/DNS
net_test.go:175: (dbg) Run:  kubectl --context custom-flannel-730581 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/custom-flannel/DNS (0.21s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/Localhost (0.17s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/Localhost
net_test.go:194: (dbg) Run:  kubectl --context custom-flannel-730581 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/custom-flannel/Localhost (0.17s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/HairPin (0.19s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/HairPin
net_test.go:264: (dbg) Run:  kubectl --context custom-flannel-730581 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/custom-flannel/HairPin (0.19s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/Start (84.91s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p bridge-730581 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=bridge --driver=docker  --container-runtime=containerd
net_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p bridge-730581 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=bridge --driver=docker  --container-runtime=containerd: (1m24.909630502s)
--- PASS: TestNetworkPlugins/group/bridge/Start (84.91s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/ControllerPod (5.04s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/ControllerPod
net_test.go:120: (dbg) TestNetworkPlugins/group/kindnet/ControllerPod: waiting 10m0s for pods matching "app=kindnet" in namespace "kube-system" ...
helpers_test.go:344: "kindnet-rgpfn" [29a4bd01-ec7c-4bb9-a75d-bd0ce569960f] Running
net_test.go:120: (dbg) TestNetworkPlugins/group/kindnet/ControllerPod: app=kindnet healthy within 5.036114592s
--- PASS: TestNetworkPlugins/group/kindnet/ControllerPod (5.04s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/KubeletFlags (0.44s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-arm64 ssh -p kindnet-730581 "pgrep -a kubelet"
--- PASS: TestNetworkPlugins/group/kindnet/KubeletFlags (0.44s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/NetCatPod (10.65s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context kindnet-730581 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/kindnet/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:344: "netcat-56589dfd74-9dcsw" [efa61612-54ae-4fee-a675-1f9487b091d1] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:344: "netcat-56589dfd74-9dcsw" [efa61612-54ae-4fee-a675-1f9487b091d1] Running
E1201 19:56:52.960810  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/no-preload-253613/client.crt: no such file or directory
E1201 19:56:52.966057  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/no-preload-253613/client.crt: no such file or directory
E1201 19:56:52.976279  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/no-preload-253613/client.crt: no such file or directory
E1201 19:56:52.996543  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/no-preload-253613/client.crt: no such file or directory
E1201 19:56:53.036822  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/no-preload-253613/client.crt: no such file or directory
E1201 19:56:53.117237  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/no-preload-253613/client.crt: no such file or directory
E1201 19:56:53.277649  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/no-preload-253613/client.crt: no such file or directory
E1201 19:56:53.598383  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/no-preload-253613/client.crt: no such file or directory
E1201 19:56:54.239287  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/no-preload-253613/client.crt: no such file or directory
net_test.go:163: (dbg) TestNetworkPlugins/group/kindnet/NetCatPod: app=netcat healthy within 10.015150544s
--- PASS: TestNetworkPlugins/group/kindnet/NetCatPod (10.65s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/DNS (0.22s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/DNS
net_test.go:175: (dbg) Run:  kubectl --context kindnet-730581 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/kindnet/DNS (0.22s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/Localhost (0.19s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/Localhost
net_test.go:194: (dbg) Run:  kubectl --context kindnet-730581 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/kindnet/Localhost (0.19s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/HairPin (0.2s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/HairPin
net_test.go:264: (dbg) Run:  kubectl --context kindnet-730581 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/kindnet/HairPin (0.20s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/Start (83.46s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p enable-default-cni-730581 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --enable-default-cni=true --driver=docker  --container-runtime=containerd
E1201 19:57:23.868070  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/auto-730581/client.crt: no such file or directory
E1201 19:57:33.925778  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/no-preload-253613/client.crt: no such file or directory
E1201 19:57:44.349121  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/auto-730581/client.crt: no such file or directory
net_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p enable-default-cni-730581 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --enable-default-cni=true --driver=docker  --container-runtime=containerd: (1m23.458635865s)
--- PASS: TestNetworkPlugins/group/enable-default-cni/Start (83.46s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/KubeletFlags (0.48s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-arm64 ssh -p bridge-730581 "pgrep -a kubelet"
--- PASS: TestNetworkPlugins/group/bridge/KubeletFlags (0.48s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/NetCatPod (10.58s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context bridge-730581 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/bridge/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:344: "netcat-56589dfd74-ffdvm" [6b087be2-fdb5-45f5-aba2-b51b82ed47d2] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:344: "netcat-56589dfd74-ffdvm" [6b087be2-fdb5-45f5-aba2-b51b82ed47d2] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/bridge/NetCatPod: app=netcat healthy within 10.012461964s
--- PASS: TestNetworkPlugins/group/bridge/NetCatPod (10.58s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/DNS (0.25s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/DNS
net_test.go:175: (dbg) Run:  kubectl --context bridge-730581 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/bridge/DNS (0.25s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/Localhost (0.19s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/Localhost
net_test.go:194: (dbg) Run:  kubectl --context bridge-730581 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/bridge/Localhost (0.19s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/HairPin (0.25s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/HairPin
net_test.go:264: (dbg) Run:  kubectl --context bridge-730581 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/bridge/HairPin (0.25s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/KubeletFlags (0.34s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-arm64 ssh -p enable-default-cni-730581 "pgrep -a kubelet"
--- PASS: TestNetworkPlugins/group/enable-default-cni/KubeletFlags (0.34s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/NetCatPod (10.36s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context enable-default-cni-730581 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/enable-default-cni/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:344: "netcat-56589dfd74-gz5lt" [3996219c-7c8d-4eee-960e-6e4b0d470b2f] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:344: "netcat-56589dfd74-gz5lt" [3996219c-7c8d-4eee-960e-6e4b0d470b2f] Running
E1201 19:58:48.680664  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/flannel-730581/client.crt: no such file or directory
E1201 19:58:48.686024  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/flannel-730581/client.crt: no such file or directory
E1201 19:58:48.696239  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/flannel-730581/client.crt: no such file or directory
E1201 19:58:48.716557  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/flannel-730581/client.crt: no such file or directory
E1201 19:58:48.756823  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/flannel-730581/client.crt: no such file or directory
E1201 19:58:48.837147  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/flannel-730581/client.crt: no such file or directory
E1201 19:58:48.997545  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/flannel-730581/client.crt: no such file or directory
E1201 19:58:49.318367  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/flannel-730581/client.crt: no such file or directory
E1201 19:58:49.958531  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/flannel-730581/client.crt: no such file or directory
E1201 19:58:51.239693  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/flannel-730581/client.crt: no such file or directory
net_test.go:163: (dbg) TestNetworkPlugins/group/enable-default-cni/NetCatPod: app=netcat healthy within 10.011427374s
--- PASS: TestNetworkPlugins/group/enable-default-cni/NetCatPod (10.36s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/DNS (0.2s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/DNS
net_test.go:175: (dbg) Run:  kubectl --context enable-default-cni-730581 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/enable-default-cni/DNS (0.20s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/Localhost (0.17s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/Localhost
net_test.go:194: (dbg) Run:  kubectl --context enable-default-cni-730581 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
E1201 19:58:53.800019  258301 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/flannel-730581/client.crt: no such file or directory
--- PASS: TestNetworkPlugins/group/enable-default-cni/Localhost (0.17s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/HairPin (0.19s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/HairPin
net_test.go:264: (dbg) Run:  kubectl --context enable-default-cni-730581 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/enable-default-cni/HairPin (0.19s)

                                                
                                    

Test skip (31/314)

x
+
TestDownloadOnly/v1.16.0/cached-images (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.16.0/cached-images
aaa_download_only_test.go:117: Preload exists, images won't be cached
--- SKIP: TestDownloadOnly/v1.16.0/cached-images (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.16.0/binaries (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.16.0/binaries
aaa_download_only_test.go:139: Preload exists, binaries are present within.
--- SKIP: TestDownloadOnly/v1.16.0/binaries (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.16.0/kubectl (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.16.0/kubectl
aaa_download_only_test.go:155: Test for darwin and windows
--- SKIP: TestDownloadOnly/v1.16.0/kubectl (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.28.4/cached-images (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.28.4/cached-images
aaa_download_only_test.go:117: Preload exists, images won't be cached
--- SKIP: TestDownloadOnly/v1.28.4/cached-images (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.28.4/binaries (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.28.4/binaries
aaa_download_only_test.go:139: Preload exists, binaries are present within.
--- SKIP: TestDownloadOnly/v1.28.4/binaries (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.28.4/kubectl (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.28.4/kubectl
aaa_download_only_test.go:155: Test for darwin and windows
--- SKIP: TestDownloadOnly/v1.28.4/kubectl (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.29.0-rc.1/cached-images (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.29.0-rc.1/cached-images
aaa_download_only_test.go:117: Preload exists, images won't be cached
--- SKIP: TestDownloadOnly/v1.29.0-rc.1/cached-images (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.29.0-rc.1/binaries (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.29.0-rc.1/binaries
aaa_download_only_test.go:139: Preload exists, binaries are present within.
--- SKIP: TestDownloadOnly/v1.29.0-rc.1/binaries (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.29.0-rc.1/kubectl (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.29.0-rc.1/kubectl
aaa_download_only_test.go:155: Test for darwin and windows
--- SKIP: TestDownloadOnly/v1.29.0-rc.1/kubectl (0.00s)

                                                
                                    
x
+
TestDownloadOnlyKic (0.66s)

                                                
                                                
=== RUN   TestDownloadOnlyKic
aaa_download_only_test.go:225: (dbg) Run:  out/minikube-linux-arm64 start --download-only -p download-docker-911952 --alsologtostderr --driver=docker  --container-runtime=containerd
aaa_download_only_test.go:237: Skip for arm64 platform. See https://github.com/kubernetes/minikube/issues/10144
helpers_test.go:175: Cleaning up "download-docker-911952" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p download-docker-911952
--- SKIP: TestDownloadOnlyKic (0.66s)

                                                
                                    
x
+
TestOffline (0s)

                                                
                                                
=== RUN   TestOffline
=== PAUSE TestOffline

                                                
                                                

                                                
                                                
=== CONT  TestOffline
aab_offline_test.go:35: skipping TestOffline - only docker runtime supported on arm64. See https://github.com/kubernetes/minikube/issues/10144
--- SKIP: TestOffline (0.00s)

                                                
                                    
x
+
TestAddons/parallel/HelmTiller (0s)

                                                
                                                
=== RUN   TestAddons/parallel/HelmTiller
=== PAUSE TestAddons/parallel/HelmTiller

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/HelmTiller
addons_test.go:443: skip Helm test on arm64
--- SKIP: TestAddons/parallel/HelmTiller (0.00s)

                                                
                                    
x
+
TestAddons/parallel/Olm (0s)

                                                
                                                
=== RUN   TestAddons/parallel/Olm
=== PAUSE TestAddons/parallel/Olm

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/Olm
addons_test.go:497: Skipping OLM addon test until https://github.com/operator-framework/operator-lifecycle-manager/issues/2534 is resolved
--- SKIP: TestAddons/parallel/Olm (0.00s)

                                                
                                    
x
+
TestDockerFlags (0s)

                                                
                                                
=== RUN   TestDockerFlags
docker_test.go:41: skipping: only runs with docker container runtime, currently testing containerd
--- SKIP: TestDockerFlags (0.00s)

                                                
                                    
x
+
TestKVMDriverInstallOrUpdate (0s)

                                                
                                                
=== RUN   TestKVMDriverInstallOrUpdate
driver_install_or_update_test.go:45: Skip if arm64. See https://github.com/kubernetes/minikube/issues/10144
--- SKIP: TestKVMDriverInstallOrUpdate (0.00s)

                                                
                                    
x
+
TestHyperKitDriverInstallOrUpdate (0s)

                                                
                                                
=== RUN   TestHyperKitDriverInstallOrUpdate
driver_install_or_update_test.go:105: Skip if not darwin.
--- SKIP: TestHyperKitDriverInstallOrUpdate (0.00s)

                                                
                                    
x
+
TestHyperkitDriverSkipUpgrade (0s)

                                                
                                                
=== RUN   TestHyperkitDriverSkipUpgrade
driver_install_or_update_test.go:169: Skip if not darwin.
--- SKIP: TestHyperkitDriverSkipUpgrade (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/MySQL (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/MySQL
=== PAUSE TestFunctional/parallel/MySQL

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/MySQL
functional_test.go:1783: arm64 is not supported by mysql. Skip the test. See https://github.com/kubernetes/minikube/issues/10144
--- SKIP: TestFunctional/parallel/MySQL (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/DockerEnv (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/DockerEnv
=== PAUSE TestFunctional/parallel/DockerEnv

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/DockerEnv
functional_test.go:459: only validate docker env with docker container runtime, currently testing containerd
--- SKIP: TestFunctional/parallel/DockerEnv (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/PodmanEnv (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/PodmanEnv
=== PAUSE TestFunctional/parallel/PodmanEnv

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/PodmanEnv
functional_test.go:546: only validate podman env with docker container runtime, currently testing containerd
--- SKIP: TestFunctional/parallel/PodmanEnv (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDig (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDig
functional_test_tunnel_test.go:99: DNS forwarding is only supported for Hyperkit on Darwin, skipping test DNS forwarding
--- SKIP: TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDig (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDscacheutil (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDscacheutil
functional_test_tunnel_test.go:99: DNS forwarding is only supported for Hyperkit on Darwin, skipping test DNS forwarding
--- SKIP: TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDscacheutil (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/AccessThroughDNS (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/AccessThroughDNS
functional_test_tunnel_test.go:99: DNS forwarding is only supported for Hyperkit on Darwin, skipping test DNS forwarding
--- SKIP: TestFunctional/parallel/TunnelCmd/serial/AccessThroughDNS (0.00s)

                                                
                                    
x
+
TestGvisorAddon (0s)

                                                
                                                
=== RUN   TestGvisorAddon
gvisor_addon_test.go:34: skipping test because --gvisor=false
--- SKIP: TestGvisorAddon (0.00s)

                                                
                                    
x
+
TestImageBuild (0s)

                                                
                                                
=== RUN   TestImageBuild
image_test.go:33: 
--- SKIP: TestImageBuild (0.00s)

                                                
                                    
x
+
TestChangeNoneUser (0s)

                                                
                                                
=== RUN   TestChangeNoneUser
none_test.go:38: Test requires none driver and SUDO_USER env to not be empty
--- SKIP: TestChangeNoneUser (0.00s)

                                                
                                    
x
+
TestScheduledStopWindows (0s)

                                                
                                                
=== RUN   TestScheduledStopWindows
scheduled_stop_test.go:42: test only runs on windows
--- SKIP: TestScheduledStopWindows (0.00s)

                                                
                                    
x
+
TestSkaffold (0s)

                                                
                                                
=== RUN   TestSkaffold
skaffold_test.go:45: skaffold requires docker-env, currently testing containerd container runtime
--- SKIP: TestSkaffold (0.00s)

                                                
                                    
x
+
TestStartStop/group/disable-driver-mounts (0.18s)

                                                
                                                
=== RUN   TestStartStop/group/disable-driver-mounts
=== PAUSE TestStartStop/group/disable-driver-mounts

                                                
                                                

                                                
                                                
=== CONT  TestStartStop/group/disable-driver-mounts
start_stop_delete_test.go:103: skipping TestStartStop/group/disable-driver-mounts - only runs on virtualbox
helpers_test.go:175: Cleaning up "disable-driver-mounts-340821" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p disable-driver-mounts-340821
--- SKIP: TestStartStop/group/disable-driver-mounts (0.18s)

                                                
                                    
x
+
TestNetworkPlugins/group/kubenet (5.94s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kubenet
net_test.go:93: Skipping the test as containerd container runtimes requires CNI
panic.go:523: 
----------------------- debugLogs start: kubenet-730581 [pass: true] --------------------------------
>>> netcat: nslookup kubernetes.default:
Error in configuration: context was not found for specified context: kubenet-730581

                                                
                                                

                                                
                                                
>>> netcat: nslookup debug kubernetes.default a-records:
Error in configuration: context was not found for specified context: kubenet-730581

                                                
                                                

                                                
                                                
>>> netcat: dig search kubernetes.default:
Error in configuration: context was not found for specified context: kubenet-730581

                                                
                                                

                                                
                                                
>>> netcat: dig @10.96.0.10 kubernetes.default.svc.cluster.local udp/53:
Error in configuration: context was not found for specified context: kubenet-730581

                                                
                                                

                                                
                                                
>>> netcat: dig @10.96.0.10 kubernetes.default.svc.cluster.local tcp/53:
Error in configuration: context was not found for specified context: kubenet-730581

                                                
                                                

                                                
                                                
>>> netcat: nc 10.96.0.10 udp/53:
Error in configuration: context was not found for specified context: kubenet-730581

                                                
                                                

                                                
                                                
>>> netcat: nc 10.96.0.10 tcp/53:
Error in configuration: context was not found for specified context: kubenet-730581

                                                
                                                

                                                
                                                
>>> netcat: /etc/nsswitch.conf:
Error in configuration: context was not found for specified context: kubenet-730581

                                                
                                                

                                                
                                                
>>> netcat: /etc/hosts:
Error in configuration: context was not found for specified context: kubenet-730581

                                                
                                                

                                                
                                                
>>> netcat: /etc/resolv.conf:
Error in configuration: context was not found for specified context: kubenet-730581

                                                
                                                

                                                
                                                
>>> host: /etc/nsswitch.conf:
* Profile "kubenet-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-730581"

                                                
                                                

                                                
                                                
>>> host: /etc/hosts:
* Profile "kubenet-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-730581"

                                                
                                                

                                                
                                                
>>> host: /etc/resolv.conf:
* Profile "kubenet-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-730581"

                                                
                                                

                                                
                                                
>>> k8s: nodes, services, endpoints, daemon sets, deployments and pods, :
Error in configuration: context was not found for specified context: kubenet-730581

                                                
                                                

                                                
                                                
>>> host: crictl pods:
* Profile "kubenet-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-730581"

                                                
                                                

                                                
                                                
>>> host: crictl containers:
* Profile "kubenet-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-730581"

                                                
                                                

                                                
                                                
>>> k8s: describe netcat deployment:
error: context "kubenet-730581" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe netcat pod(s):
error: context "kubenet-730581" does not exist

                                                
                                                

                                                
                                                
>>> k8s: netcat logs:
error: context "kubenet-730581" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe coredns deployment:
error: context "kubenet-730581" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe coredns pods:
error: context "kubenet-730581" does not exist

                                                
                                                

                                                
                                                
>>> k8s: coredns logs:
error: context "kubenet-730581" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe api server pod(s):
error: context "kubenet-730581" does not exist

                                                
                                                

                                                
                                                
>>> k8s: api server logs:
error: context "kubenet-730581" does not exist

                                                
                                                

                                                
                                                
>>> host: /etc/cni:
* Profile "kubenet-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-730581"

                                                
                                                

                                                
                                                
>>> host: ip a s:
* Profile "kubenet-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-730581"

                                                
                                                

                                                
                                                
>>> host: ip r s:
* Profile "kubenet-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-730581"

                                                
                                                

                                                
                                                
>>> host: iptables-save:
* Profile "kubenet-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-730581"

                                                
                                                

                                                
                                                
>>> host: iptables table nat:
* Profile "kubenet-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-730581"

                                                
                                                

                                                
                                                
>>> k8s: describe kube-proxy daemon set:
error: context "kubenet-730581" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe kube-proxy pod(s):
error: context "kubenet-730581" does not exist

                                                
                                                

                                                
                                                
>>> k8s: kube-proxy logs:
error: context "kubenet-730581" does not exist

                                                
                                                

                                                
                                                
>>> host: kubelet daemon status:
* Profile "kubenet-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-730581"

                                                
                                                

                                                
                                                
>>> host: kubelet daemon config:
* Profile "kubenet-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-730581"

                                                
                                                

                                                
                                                
>>> k8s: kubelet logs:
* Profile "kubenet-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-730581"

                                                
                                                

                                                
                                                
>>> host: /etc/kubernetes/kubelet.conf:
* Profile "kubenet-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-730581"

                                                
                                                

                                                
                                                
>>> host: /var/lib/kubelet/config.yaml:
* Profile "kubenet-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-730581"

                                                
                                                

                                                
                                                
>>> k8s: kubectl config:
apiVersion: v1
clusters:
- cluster:
certificate-authority: /home/jenkins/minikube-integration/17703-252966/.minikube/ca.crt
extensions:
- extension:
last-update: Fri, 01 Dec 2023 19:30:11 UTC
provider: minikube.sigs.k8s.io
version: v1.32.0
name: cluster_info
server: https://192.168.76.2:8443
name: NoKubernetes-053646
contexts:
- context:
cluster: NoKubernetes-053646
extensions:
- extension:
last-update: Fri, 01 Dec 2023 19:30:11 UTC
provider: minikube.sigs.k8s.io
version: v1.32.0
name: context_info
namespace: default
user: NoKubernetes-053646
name: NoKubernetes-053646
current-context: NoKubernetes-053646
kind: Config
preferences: {}
users:
- name: NoKubernetes-053646
user:
client-certificate: /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/NoKubernetes-053646/client.crt
client-key: /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/NoKubernetes-053646/client.key

                                                
                                                

                                                
                                                
>>> k8s: cms:
Error in configuration: context was not found for specified context: kubenet-730581

                                                
                                                

                                                
                                                
>>> host: docker daemon status:
* Profile "kubenet-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-730581"

                                                
                                                

                                                
                                                
>>> host: docker daemon config:
* Profile "kubenet-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-730581"

                                                
                                                

                                                
                                                
>>> host: /etc/docker/daemon.json:
* Profile "kubenet-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-730581"

                                                
                                                

                                                
                                                
>>> host: docker system info:
* Profile "kubenet-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-730581"

                                                
                                                

                                                
                                                
>>> host: cri-docker daemon status:
* Profile "kubenet-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-730581"

                                                
                                                

                                                
                                                
>>> host: cri-docker daemon config:
* Profile "kubenet-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-730581"

                                                
                                                

                                                
                                                
>>> host: /etc/systemd/system/cri-docker.service.d/10-cni.conf:
* Profile "kubenet-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-730581"

                                                
                                                

                                                
                                                
>>> host: /usr/lib/systemd/system/cri-docker.service:
* Profile "kubenet-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-730581"

                                                
                                                

                                                
                                                
>>> host: cri-dockerd version:
* Profile "kubenet-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-730581"

                                                
                                                

                                                
                                                
>>> host: containerd daemon status:
* Profile "kubenet-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-730581"

                                                
                                                

                                                
                                                
>>> host: containerd daemon config:
* Profile "kubenet-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-730581"

                                                
                                                

                                                
                                                
>>> host: /lib/systemd/system/containerd.service:
* Profile "kubenet-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-730581"

                                                
                                                

                                                
                                                
>>> host: /etc/containerd/config.toml:
* Profile "kubenet-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-730581"

                                                
                                                

                                                
                                                
>>> host: containerd config dump:
* Profile "kubenet-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-730581"

                                                
                                                

                                                
                                                
>>> host: crio daemon status:
* Profile "kubenet-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-730581"

                                                
                                                

                                                
                                                
>>> host: crio daemon config:
* Profile "kubenet-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-730581"

                                                
                                                

                                                
                                                
>>> host: /etc/crio:
* Profile "kubenet-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-730581"

                                                
                                                

                                                
                                                
>>> host: crio config:
* Profile "kubenet-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-730581"

                                                
                                                
----------------------- debugLogs end: kubenet-730581 [took: 5.71906374s] --------------------------------
helpers_test.go:175: Cleaning up "kubenet-730581" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p kubenet-730581
--- SKIP: TestNetworkPlugins/group/kubenet (5.94s)

                                                
                                    
x
+
TestNetworkPlugins/group/cilium (4.51s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/cilium
net_test.go:102: Skipping the test as it's interfering with other tests and is outdated
panic.go:523: 
----------------------- debugLogs start: cilium-730581 [pass: true] --------------------------------
>>> netcat: nslookup kubernetes.default:
Error in configuration: context was not found for specified context: cilium-730581

                                                
                                                

                                                
                                                
>>> netcat: nslookup debug kubernetes.default a-records:
Error in configuration: context was not found for specified context: cilium-730581

                                                
                                                

                                                
                                                
>>> netcat: dig search kubernetes.default:
Error in configuration: context was not found for specified context: cilium-730581

                                                
                                                

                                                
                                                
>>> netcat: dig @10.96.0.10 kubernetes.default.svc.cluster.local udp/53:
Error in configuration: context was not found for specified context: cilium-730581

                                                
                                                

                                                
                                                
>>> netcat: dig @10.96.0.10 kubernetes.default.svc.cluster.local tcp/53:
Error in configuration: context was not found for specified context: cilium-730581

                                                
                                                

                                                
                                                
>>> netcat: nc 10.96.0.10 udp/53:
Error in configuration: context was not found for specified context: cilium-730581

                                                
                                                

                                                
                                                
>>> netcat: nc 10.96.0.10 tcp/53:
Error in configuration: context was not found for specified context: cilium-730581

                                                
                                                

                                                
                                                
>>> netcat: /etc/nsswitch.conf:
Error in configuration: context was not found for specified context: cilium-730581

                                                
                                                

                                                
                                                
>>> netcat: /etc/hosts:
Error in configuration: context was not found for specified context: cilium-730581

                                                
                                                

                                                
                                                
>>> netcat: /etc/resolv.conf:
Error in configuration: context was not found for specified context: cilium-730581

                                                
                                                

                                                
                                                
>>> host: /etc/nsswitch.conf:
* Profile "cilium-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-730581"

                                                
                                                

                                                
                                                
>>> host: /etc/hosts:
* Profile "cilium-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-730581"

                                                
                                                

                                                
                                                
>>> host: /etc/resolv.conf:
* Profile "cilium-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-730581"

                                                
                                                

                                                
                                                
>>> k8s: nodes, services, endpoints, daemon sets, deployments and pods, :
Error in configuration: context was not found for specified context: cilium-730581

                                                
                                                

                                                
                                                
>>> host: crictl pods:
* Profile "cilium-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-730581"

                                                
                                                

                                                
                                                
>>> host: crictl containers:
* Profile "cilium-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-730581"

                                                
                                                

                                                
                                                
>>> k8s: describe netcat deployment:
error: context "cilium-730581" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe netcat pod(s):
error: context "cilium-730581" does not exist

                                                
                                                

                                                
                                                
>>> k8s: netcat logs:
error: context "cilium-730581" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe coredns deployment:
error: context "cilium-730581" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe coredns pods:
error: context "cilium-730581" does not exist

                                                
                                                

                                                
                                                
>>> k8s: coredns logs:
error: context "cilium-730581" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe api server pod(s):
error: context "cilium-730581" does not exist

                                                
                                                

                                                
                                                
>>> k8s: api server logs:
error: context "cilium-730581" does not exist

                                                
                                                

                                                
                                                
>>> host: /etc/cni:
* Profile "cilium-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-730581"

                                                
                                                

                                                
                                                
>>> host: ip a s:
* Profile "cilium-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-730581"

                                                
                                                

                                                
                                                
>>> host: ip r s:
* Profile "cilium-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-730581"

                                                
                                                

                                                
                                                
>>> host: iptables-save:
* Profile "cilium-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-730581"

                                                
                                                

                                                
                                                
>>> host: iptables table nat:
* Profile "cilium-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-730581"

                                                
                                                

                                                
                                                
>>> k8s: describe cilium daemon set:
Error in configuration: context was not found for specified context: cilium-730581

                                                
                                                

                                                
                                                
>>> k8s: describe cilium daemon set pod(s):
Error in configuration: context was not found for specified context: cilium-730581

                                                
                                                

                                                
                                                
>>> k8s: cilium daemon set container(s) logs (current):
error: context "cilium-730581" does not exist

                                                
                                                

                                                
                                                
>>> k8s: cilium daemon set container(s) logs (previous):
error: context "cilium-730581" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe cilium deployment:
Error in configuration: context was not found for specified context: cilium-730581

                                                
                                                

                                                
                                                
>>> k8s: describe cilium deployment pod(s):
Error in configuration: context was not found for specified context: cilium-730581

                                                
                                                

                                                
                                                
>>> k8s: cilium deployment container(s) logs (current):
error: context "cilium-730581" does not exist

                                                
                                                

                                                
                                                
>>> k8s: cilium deployment container(s) logs (previous):
error: context "cilium-730581" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe kube-proxy daemon set:
error: context "cilium-730581" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe kube-proxy pod(s):
error: context "cilium-730581" does not exist

                                                
                                                

                                                
                                                
>>> k8s: kube-proxy logs:
error: context "cilium-730581" does not exist

                                                
                                                

                                                
                                                
>>> host: kubelet daemon status:
* Profile "cilium-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-730581"

                                                
                                                

                                                
                                                
>>> host: kubelet daemon config:
* Profile "cilium-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-730581"

                                                
                                                

                                                
                                                
>>> k8s: kubelet logs:
* Profile "cilium-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-730581"

                                                
                                                

                                                
                                                
>>> host: /etc/kubernetes/kubelet.conf:
* Profile "cilium-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-730581"

                                                
                                                

                                                
                                                
>>> host: /var/lib/kubelet/config.yaml:
* Profile "cilium-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-730581"

                                                
                                                

                                                
                                                
>>> k8s: kubectl config:
apiVersion: v1
clusters:
- cluster:
certificate-authority: /home/jenkins/minikube-integration/17703-252966/.minikube/ca.crt
extensions:
- extension:
last-update: Fri, 01 Dec 2023 19:30:11 UTC
provider: minikube.sigs.k8s.io
version: v1.32.0
name: cluster_info
server: https://192.168.76.2:8443
name: NoKubernetes-053646
contexts:
- context:
cluster: NoKubernetes-053646
extensions:
- extension:
last-update: Fri, 01 Dec 2023 19:30:11 UTC
provider: minikube.sigs.k8s.io
version: v1.32.0
name: context_info
namespace: default
user: NoKubernetes-053646
name: NoKubernetes-053646
current-context: NoKubernetes-053646
kind: Config
preferences: {}
users:
- name: NoKubernetes-053646
user:
client-certificate: /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/NoKubernetes-053646/client.crt
client-key: /home/jenkins/minikube-integration/17703-252966/.minikube/profiles/NoKubernetes-053646/client.key

                                                
                                                

                                                
                                                
>>> k8s: cms:
Error in configuration: context was not found for specified context: cilium-730581

                                                
                                                

                                                
                                                
>>> host: docker daemon status:
* Profile "cilium-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-730581"

                                                
                                                

                                                
                                                
>>> host: docker daemon config:
* Profile "cilium-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-730581"

                                                
                                                

                                                
                                                
>>> host: /etc/docker/daemon.json:
* Profile "cilium-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-730581"

                                                
                                                

                                                
                                                
>>> host: docker system info:
* Profile "cilium-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-730581"

                                                
                                                

                                                
                                                
>>> host: cri-docker daemon status:
* Profile "cilium-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-730581"

                                                
                                                

                                                
                                                
>>> host: cri-docker daemon config:
* Profile "cilium-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-730581"

                                                
                                                

                                                
                                                
>>> host: /etc/systemd/system/cri-docker.service.d/10-cni.conf:
* Profile "cilium-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-730581"

                                                
                                                

                                                
                                                
>>> host: /usr/lib/systemd/system/cri-docker.service:
* Profile "cilium-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-730581"

                                                
                                                

                                                
                                                
>>> host: cri-dockerd version:
* Profile "cilium-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-730581"

                                                
                                                

                                                
                                                
>>> host: containerd daemon status:
* Profile "cilium-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-730581"

                                                
                                                

                                                
                                                
>>> host: containerd daemon config:
* Profile "cilium-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-730581"

                                                
                                                

                                                
                                                
>>> host: /lib/systemd/system/containerd.service:
* Profile "cilium-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-730581"

                                                
                                                

                                                
                                                
>>> host: /etc/containerd/config.toml:
* Profile "cilium-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-730581"

                                                
                                                

                                                
                                                
>>> host: containerd config dump:
* Profile "cilium-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-730581"

                                                
                                                

                                                
                                                
>>> host: crio daemon status:
* Profile "cilium-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-730581"

                                                
                                                

                                                
                                                
>>> host: crio daemon config:
* Profile "cilium-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-730581"

                                                
                                                

                                                
                                                
>>> host: /etc/crio:
* Profile "cilium-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-730581"

                                                
                                                

                                                
                                                
>>> host: crio config:
* Profile "cilium-730581" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-730581"

                                                
                                                
----------------------- debugLogs end: cilium-730581 [took: 4.324109895s] --------------------------------
helpers_test.go:175: Cleaning up "cilium-730581" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p cilium-730581
--- SKIP: TestNetworkPlugins/group/cilium (4.51s)

                                                
                                    
Copied to clipboard