Test Report: Docker_Linux_docker_arm64 19106

                    
                      0b6579e93b2a9bd368d98c5e9e3374097121bbca:2024-06-20:34974
                    
                

Test fail (2/343)

Order failed test Duration
30 TestAddons/parallel/Ingress 36.58
369 TestStartStop/group/old-k8s-version/serial/SecondStart 376.34
x
+
TestAddons/parallel/Ingress (36.58s)

                                                
                                                
=== RUN   TestAddons/parallel/Ingress
=== PAUSE TestAddons/parallel/Ingress

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/Ingress
addons_test.go:209: (dbg) Run:  kubectl --context addons-705802 wait --for=condition=ready --namespace=ingress-nginx pod --selector=app.kubernetes.io/component=controller --timeout=90s
addons_test.go:234: (dbg) Run:  kubectl --context addons-705802 replace --force -f testdata/nginx-ingress-v1.yaml
addons_test.go:247: (dbg) Run:  kubectl --context addons-705802 replace --force -f testdata/nginx-pod-svc.yaml
addons_test.go:252: (dbg) TestAddons/parallel/Ingress: waiting 8m0s for pods matching "run=nginx" in namespace "default" ...
helpers_test.go:344: "nginx" [3f2c28a1-5663-4f15-bf0b-5a79906e02c9] Pending / Ready:ContainersNotReady (containers with unready status: [nginx]) / ContainersReady:ContainersNotReady (containers with unready status: [nginx])
helpers_test.go:344: "nginx" [3f2c28a1-5663-4f15-bf0b-5a79906e02c9] Running
addons_test.go:252: (dbg) TestAddons/parallel/Ingress: run=nginx healthy within 9.003970746s
addons_test.go:264: (dbg) Run:  out/minikube-linux-arm64 -p addons-705802 ssh "curl -s http://127.0.0.1/ -H 'Host: nginx.example.com'"
addons_test.go:288: (dbg) Run:  kubectl --context addons-705802 replace --force -f testdata/ingress-dns-example-v1.yaml
addons_test.go:293: (dbg) Run:  out/minikube-linux-arm64 -p addons-705802 ip
addons_test.go:299: (dbg) Run:  nslookup hello-john.test 192.168.49.2
addons_test.go:299: (dbg) Non-zero exit: nslookup hello-john.test 192.168.49.2: exit status 1 (15.069783505s)

                                                
                                                
-- stdout --
	;; connection timed out; no servers could be reached
	
	

                                                
                                                
-- /stdout --
addons_test.go:301: failed to nslookup hello-john.test host. args "nslookup hello-john.test 192.168.49.2" : exit status 1
addons_test.go:305: unexpected output from nslookup. stdout: ;; connection timed out; no servers could be reached

                                                
                                                

                                                
                                                

                                                
                                                
stderr: 
addons_test.go:308: (dbg) Run:  out/minikube-linux-arm64 -p addons-705802 addons disable ingress-dns --alsologtostderr -v=1
addons_test.go:308: (dbg) Done: out/minikube-linux-arm64 -p addons-705802 addons disable ingress-dns --alsologtostderr -v=1: (1.398367888s)
addons_test.go:313: (dbg) Run:  out/minikube-linux-arm64 -p addons-705802 addons disable ingress --alsologtostderr -v=1
addons_test.go:313: (dbg) Done: out/minikube-linux-arm64 -p addons-705802 addons disable ingress --alsologtostderr -v=1: (7.662464452s)
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======>  post-mortem[TestAddons/parallel/Ingress]: docker inspect <======
helpers_test.go:231: (dbg) Run:  docker inspect addons-705802
helpers_test.go:235: (dbg) docker inspect addons-705802:

                                                
                                                
-- stdout --
	[
	    {
	        "Id": "bbbe59ba2132940192b0ec9c3e8c4764ef4dc955d03d7dd19cd9e53df24fdba6",
	        "Created": "2024-06-20T17:02:11.577694959Z",
	        "Path": "/usr/local/bin/entrypoint",
	        "Args": [
	            "/sbin/init"
	        ],
	        "State": {
	            "Status": "running",
	            "Running": true,
	            "Paused": false,
	            "Restarting": false,
	            "OOMKilled": false,
	            "Dead": false,
	            "Pid": 8907,
	            "ExitCode": 0,
	            "Error": "",
	            "StartedAt": "2024-06-20T17:02:11.901014622Z",
	            "FinishedAt": "0001-01-01T00:00:00Z"
	        },
	        "Image": "sha256:d01e921d87b5c98766e198911bba95096a87baa7b20caabee6d66ddda3a30e16",
	        "ResolvConfPath": "/var/lib/docker/containers/bbbe59ba2132940192b0ec9c3e8c4764ef4dc955d03d7dd19cd9e53df24fdba6/resolv.conf",
	        "HostnamePath": "/var/lib/docker/containers/bbbe59ba2132940192b0ec9c3e8c4764ef4dc955d03d7dd19cd9e53df24fdba6/hostname",
	        "HostsPath": "/var/lib/docker/containers/bbbe59ba2132940192b0ec9c3e8c4764ef4dc955d03d7dd19cd9e53df24fdba6/hosts",
	        "LogPath": "/var/lib/docker/containers/bbbe59ba2132940192b0ec9c3e8c4764ef4dc955d03d7dd19cd9e53df24fdba6/bbbe59ba2132940192b0ec9c3e8c4764ef4dc955d03d7dd19cd9e53df24fdba6-json.log",
	        "Name": "/addons-705802",
	        "RestartCount": 0,
	        "Driver": "overlay2",
	        "Platform": "linux",
	        "MountLabel": "",
	        "ProcessLabel": "",
	        "AppArmorProfile": "unconfined",
	        "ExecIDs": null,
	        "HostConfig": {
	            "Binds": [
	                "/lib/modules:/lib/modules:ro",
	                "addons-705802:/var"
	            ],
	            "ContainerIDFile": "",
	            "LogConfig": {
	                "Type": "json-file",
	                "Config": {}
	            },
	            "NetworkMode": "addons-705802",
	            "PortBindings": {
	                "22/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "2376/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "32443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "5000/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "8443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ]
	            },
	            "RestartPolicy": {
	                "Name": "no",
	                "MaximumRetryCount": 0
	            },
	            "AutoRemove": false,
	            "VolumeDriver": "",
	            "VolumesFrom": null,
	            "ConsoleSize": [
	                0,
	                0
	            ],
	            "CapAdd": null,
	            "CapDrop": null,
	            "CgroupnsMode": "host",
	            "Dns": [],
	            "DnsOptions": [],
	            "DnsSearch": [],
	            "ExtraHosts": null,
	            "GroupAdd": null,
	            "IpcMode": "private",
	            "Cgroup": "",
	            "Links": null,
	            "OomScoreAdj": 0,
	            "PidMode": "",
	            "Privileged": true,
	            "PublishAllPorts": false,
	            "ReadonlyRootfs": false,
	            "SecurityOpt": [
	                "seccomp=unconfined",
	                "apparmor=unconfined",
	                "label=disable"
	            ],
	            "Tmpfs": {
	                "/run": "",
	                "/tmp": ""
	            },
	            "UTSMode": "",
	            "UsernsMode": "",
	            "ShmSize": 67108864,
	            "Runtime": "runc",
	            "Isolation": "",
	            "CpuShares": 0,
	            "Memory": 4194304000,
	            "NanoCpus": 2000000000,
	            "CgroupParent": "",
	            "BlkioWeight": 0,
	            "BlkioWeightDevice": [],
	            "BlkioDeviceReadBps": [],
	            "BlkioDeviceWriteBps": [],
	            "BlkioDeviceReadIOps": [],
	            "BlkioDeviceWriteIOps": [],
	            "CpuPeriod": 0,
	            "CpuQuota": 0,
	            "CpuRealtimePeriod": 0,
	            "CpuRealtimeRuntime": 0,
	            "CpusetCpus": "",
	            "CpusetMems": "",
	            "Devices": [],
	            "DeviceCgroupRules": null,
	            "DeviceRequests": null,
	            "MemoryReservation": 0,
	            "MemorySwap": 8388608000,
	            "MemorySwappiness": null,
	            "OomKillDisable": false,
	            "PidsLimit": null,
	            "Ulimits": [],
	            "CpuCount": 0,
	            "CpuPercent": 0,
	            "IOMaximumIOps": 0,
	            "IOMaximumBandwidth": 0,
	            "MaskedPaths": null,
	            "ReadonlyPaths": null
	        },
	        "GraphDriver": {
	            "Data": {
	                "LowerDir": "/var/lib/docker/overlay2/4ef76f4c54fb7565101bce7711d34ae8c18d5d033b0a88f72a04f561b00f52b3-init/diff:/var/lib/docker/overlay2/3f60bca961993969053ba4629c08bd2e3000c79c5940aaa31be086f923fe76a2/diff",
	                "MergedDir": "/var/lib/docker/overlay2/4ef76f4c54fb7565101bce7711d34ae8c18d5d033b0a88f72a04f561b00f52b3/merged",
	                "UpperDir": "/var/lib/docker/overlay2/4ef76f4c54fb7565101bce7711d34ae8c18d5d033b0a88f72a04f561b00f52b3/diff",
	                "WorkDir": "/var/lib/docker/overlay2/4ef76f4c54fb7565101bce7711d34ae8c18d5d033b0a88f72a04f561b00f52b3/work"
	            },
	            "Name": "overlay2"
	        },
	        "Mounts": [
	            {
	                "Type": "volume",
	                "Name": "addons-705802",
	                "Source": "/var/lib/docker/volumes/addons-705802/_data",
	                "Destination": "/var",
	                "Driver": "local",
	                "Mode": "z",
	                "RW": true,
	                "Propagation": ""
	            },
	            {
	                "Type": "bind",
	                "Source": "/lib/modules",
	                "Destination": "/lib/modules",
	                "Mode": "ro",
	                "RW": false,
	                "Propagation": "rprivate"
	            }
	        ],
	        "Config": {
	            "Hostname": "addons-705802",
	            "Domainname": "",
	            "User": "",
	            "AttachStdin": false,
	            "AttachStdout": false,
	            "AttachStderr": false,
	            "ExposedPorts": {
	                "22/tcp": {},
	                "2376/tcp": {},
	                "32443/tcp": {},
	                "5000/tcp": {},
	                "8443/tcp": {}
	            },
	            "Tty": true,
	            "OpenStdin": false,
	            "StdinOnce": false,
	            "Env": [
	                "container=docker",
	                "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
	            ],
	            "Cmd": null,
	            "Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636",
	            "Volumes": null,
	            "WorkingDir": "/",
	            "Entrypoint": [
	                "/usr/local/bin/entrypoint",
	                "/sbin/init"
	            ],
	            "OnBuild": null,
	            "Labels": {
	                "created_by.minikube.sigs.k8s.io": "true",
	                "mode.minikube.sigs.k8s.io": "addons-705802",
	                "name.minikube.sigs.k8s.io": "addons-705802",
	                "role.minikube.sigs.k8s.io": ""
	            },
	            "StopSignal": "SIGRTMIN+3"
	        },
	        "NetworkSettings": {
	            "Bridge": "",
	            "SandboxID": "2f282c27073e22ad42e117ae42dab7883e861fef3c216a5105fbade45a43b095",
	            "SandboxKey": "/var/run/docker/netns/2f282c27073e",
	            "Ports": {
	                "22/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "32772"
	                    }
	                ],
	                "2376/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "32771"
	                    }
	                ],
	                "32443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "32768"
	                    }
	                ],
	                "5000/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "32770"
	                    }
	                ],
	                "8443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "32769"
	                    }
	                ]
	            },
	            "HairpinMode": false,
	            "LinkLocalIPv6Address": "",
	            "LinkLocalIPv6PrefixLen": 0,
	            "SecondaryIPAddresses": null,
	            "SecondaryIPv6Addresses": null,
	            "EndpointID": "",
	            "Gateway": "",
	            "GlobalIPv6Address": "",
	            "GlobalIPv6PrefixLen": 0,
	            "IPAddress": "",
	            "IPPrefixLen": 0,
	            "IPv6Gateway": "",
	            "MacAddress": "",
	            "Networks": {
	                "addons-705802": {
	                    "IPAMConfig": {
	                        "IPv4Address": "192.168.49.2"
	                    },
	                    "Links": null,
	                    "Aliases": null,
	                    "MacAddress": "02:42:c0:a8:31:02",
	                    "NetworkID": "5c3d62ed7be6219bff1668b2bc740d093e96d834c127ea9467a6b6a5365916b1",
	                    "EndpointID": "6ef4fa8103c15729f9bc1f1a452586707c1ecd5d54434f50fa9e6c5d07c6e975",
	                    "Gateway": "192.168.49.1",
	                    "IPAddress": "192.168.49.2",
	                    "IPPrefixLen": 24,
	                    "IPv6Gateway": "",
	                    "GlobalIPv6Address": "",
	                    "GlobalIPv6PrefixLen": 0,
	                    "DriverOpts": null,
	                    "DNSNames": [
	                        "addons-705802",
	                        "bbbe59ba2132"
	                    ]
	                }
	            }
	        }
	    }
	]

                                                
                                                
-- /stdout --
helpers_test.go:239: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p addons-705802 -n addons-705802
helpers_test.go:244: <<< TestAddons/parallel/Ingress FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======>  post-mortem[TestAddons/parallel/Ingress]: minikube logs <======
helpers_test.go:247: (dbg) Run:  out/minikube-linux-arm64 -p addons-705802 logs -n 25
helpers_test.go:247: (dbg) Done: out/minikube-linux-arm64 -p addons-705802 logs -n 25: (1.04278425s)
helpers_test.go:252: TestAddons/parallel/Ingress logs: 
-- stdout --
	
	==> Audit <==
	|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
	| Command |                                            Args                                             |        Profile         |  User   | Version |     Start Time      |      End Time       |
	|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
	| delete  | -p download-only-475314                                                                     | download-only-475314   | jenkins | v1.33.1 | 20 Jun 24 17:01 UTC | 20 Jun 24 17:01 UTC |
	| delete  | -p download-only-648762                                                                     | download-only-648762   | jenkins | v1.33.1 | 20 Jun 24 17:01 UTC | 20 Jun 24 17:01 UTC |
	| start   | --download-only -p                                                                          | download-docker-388182 | jenkins | v1.33.1 | 20 Jun 24 17:01 UTC |                     |
	|         | download-docker-388182                                                                      |                        |         |         |                     |                     |
	|         | --alsologtostderr                                                                           |                        |         |         |                     |                     |
	|         | --driver=docker                                                                             |                        |         |         |                     |                     |
	|         | --container-runtime=docker                                                                  |                        |         |         |                     |                     |
	| delete  | -p download-docker-388182                                                                   | download-docker-388182 | jenkins | v1.33.1 | 20 Jun 24 17:01 UTC | 20 Jun 24 17:01 UTC |
	| start   | --download-only -p                                                                          | binary-mirror-090394   | jenkins | v1.33.1 | 20 Jun 24 17:01 UTC |                     |
	|         | binary-mirror-090394                                                                        |                        |         |         |                     |                     |
	|         | --alsologtostderr                                                                           |                        |         |         |                     |                     |
	|         | --binary-mirror                                                                             |                        |         |         |                     |                     |
	|         | http://127.0.0.1:40791                                                                      |                        |         |         |                     |                     |
	|         | --driver=docker                                                                             |                        |         |         |                     |                     |
	|         | --container-runtime=docker                                                                  |                        |         |         |                     |                     |
	| delete  | -p binary-mirror-090394                                                                     | binary-mirror-090394   | jenkins | v1.33.1 | 20 Jun 24 17:01 UTC | 20 Jun 24 17:01 UTC |
	| addons  | disable dashboard -p                                                                        | addons-705802          | jenkins | v1.33.1 | 20 Jun 24 17:01 UTC |                     |
	|         | addons-705802                                                                               |                        |         |         |                     |                     |
	| addons  | enable dashboard -p                                                                         | addons-705802          | jenkins | v1.33.1 | 20 Jun 24 17:01 UTC |                     |
	|         | addons-705802                                                                               |                        |         |         |                     |                     |
	| start   | -p addons-705802 --wait=true                                                                | addons-705802          | jenkins | v1.33.1 | 20 Jun 24 17:01 UTC | 20 Jun 24 17:05 UTC |
	|         | --memory=4000 --alsologtostderr                                                             |                        |         |         |                     |                     |
	|         | --addons=registry                                                                           |                        |         |         |                     |                     |
	|         | --addons=metrics-server                                                                     |                        |         |         |                     |                     |
	|         | --addons=volumesnapshots                                                                    |                        |         |         |                     |                     |
	|         | --addons=csi-hostpath-driver                                                                |                        |         |         |                     |                     |
	|         | --addons=gcp-auth                                                                           |                        |         |         |                     |                     |
	|         | --addons=cloud-spanner                                                                      |                        |         |         |                     |                     |
	|         | --addons=inspektor-gadget                                                                   |                        |         |         |                     |                     |
	|         | --addons=storage-provisioner-rancher                                                        |                        |         |         |                     |                     |
	|         | --addons=nvidia-device-plugin                                                               |                        |         |         |                     |                     |
	|         | --addons=yakd --addons=volcano                                                              |                        |         |         |                     |                     |
	|         | --driver=docker                                                                             |                        |         |         |                     |                     |
	|         | --container-runtime=docker                                                                  |                        |         |         |                     |                     |
	|         | --addons=ingress                                                                            |                        |         |         |                     |                     |
	|         | --addons=ingress-dns                                                                        |                        |         |         |                     |                     |
	| addons  | enable headlamp                                                                             | addons-705802          | jenkins | v1.33.1 | 20 Jun 24 17:05 UTC | 20 Jun 24 17:05 UTC |
	|         | -p addons-705802                                                                            |                        |         |         |                     |                     |
	|         | --alsologtostderr -v=1                                                                      |                        |         |         |                     |                     |
	| ip      | addons-705802 ip                                                                            | addons-705802          | jenkins | v1.33.1 | 20 Jun 24 17:05 UTC | 20 Jun 24 17:05 UTC |
	| addons  | addons-705802 addons disable                                                                | addons-705802          | jenkins | v1.33.1 | 20 Jun 24 17:05 UTC | 20 Jun 24 17:05 UTC |
	|         | registry --alsologtostderr                                                                  |                        |         |         |                     |                     |
	|         | -v=1                                                                                        |                        |         |         |                     |                     |
	| addons  | disable nvidia-device-plugin                                                                | addons-705802          | jenkins | v1.33.1 | 20 Jun 24 17:06 UTC | 20 Jun 24 17:06 UTC |
	|         | -p addons-705802                                                                            |                        |         |         |                     |                     |
	| ssh     | addons-705802 ssh cat                                                                       | addons-705802          | jenkins | v1.33.1 | 20 Jun 24 17:06 UTC | 20 Jun 24 17:06 UTC |
	|         | /opt/local-path-provisioner/pvc-d4a82456-6a8b-4314-80b4-9dc88708fba6_default_test-pvc/file1 |                        |         |         |                     |                     |
	| addons  | addons-705802 addons disable                                                                | addons-705802          | jenkins | v1.33.1 | 20 Jun 24 17:06 UTC | 20 Jun 24 17:06 UTC |
	|         | storage-provisioner-rancher                                                                 |                        |         |         |                     |                     |
	|         | --alsologtostderr -v=1                                                                      |                        |         |         |                     |                     |
	| addons  | addons-705802 addons disable                                                                | addons-705802          | jenkins | v1.33.1 | 20 Jun 24 17:06 UTC | 20 Jun 24 17:06 UTC |
	|         | volcano --alsologtostderr -v=1                                                              |                        |         |         |                     |                     |
	| addons  | disable cloud-spanner -p                                                                    | addons-705802          | jenkins | v1.33.1 | 20 Jun 24 17:06 UTC | 20 Jun 24 17:06 UTC |
	|         | addons-705802                                                                               |                        |         |         |                     |                     |
	| addons  | disable inspektor-gadget -p                                                                 | addons-705802          | jenkins | v1.33.1 | 20 Jun 24 17:07 UTC | 20 Jun 24 17:07 UTC |
	|         | addons-705802                                                                               |                        |         |         |                     |                     |
	| addons  | addons-705802 addons                                                                        | addons-705802          | jenkins | v1.33.1 | 20 Jun 24 17:07 UTC | 20 Jun 24 17:07 UTC |
	|         | disable metrics-server                                                                      |                        |         |         |                     |                     |
	|         | --alsologtostderr -v=1                                                                      |                        |         |         |                     |                     |
	| ssh     | addons-705802 ssh curl -s                                                                   | addons-705802          | jenkins | v1.33.1 | 20 Jun 24 17:07 UTC | 20 Jun 24 17:07 UTC |
	|         | http://127.0.0.1/ -H 'Host:                                                                 |                        |         |         |                     |                     |
	|         | nginx.example.com'                                                                          |                        |         |         |                     |                     |
	| ip      | addons-705802 ip                                                                            | addons-705802          | jenkins | v1.33.1 | 20 Jun 24 17:07 UTC | 20 Jun 24 17:07 UTC |
	| addons  | addons-705802 addons                                                                        | addons-705802          | jenkins | v1.33.1 | 20 Jun 24 17:07 UTC | 20 Jun 24 17:07 UTC |
	|         | disable csi-hostpath-driver                                                                 |                        |         |         |                     |                     |
	|         | --alsologtostderr -v=1                                                                      |                        |         |         |                     |                     |
	| addons  | addons-705802 addons                                                                        | addons-705802          | jenkins | v1.33.1 | 20 Jun 24 17:07 UTC | 20 Jun 24 17:07 UTC |
	|         | disable volumesnapshots                                                                     |                        |         |         |                     |                     |
	|         | --alsologtostderr -v=1                                                                      |                        |         |         |                     |                     |
	| addons  | addons-705802 addons disable                                                                | addons-705802          | jenkins | v1.33.1 | 20 Jun 24 17:07 UTC | 20 Jun 24 17:07 UTC |
	|         | ingress-dns --alsologtostderr                                                               |                        |         |         |                     |                     |
	|         | -v=1                                                                                        |                        |         |         |                     |                     |
	| addons  | addons-705802 addons disable                                                                | addons-705802          | jenkins | v1.33.1 | 20 Jun 24 17:07 UTC | 20 Jun 24 17:07 UTC |
	|         | ingress --alsologtostderr -v=1                                                              |                        |         |         |                     |                     |
	|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
	
	
	==> Last Start <==
	Log file created at: 2024/06/20 17:01:46
	Running on machine: ip-172-31-30-239
	Binary: Built with gc go1.22.4 for linux/arm64
	Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
	I0620 17:01:46.797177    8431 out.go:291] Setting OutFile to fd 1 ...
	I0620 17:01:46.797322    8431 out.go:338] TERM=,COLORTERM=, which probably does not support color
	I0620 17:01:46.797334    8431 out.go:304] Setting ErrFile to fd 2...
	I0620 17:01:46.797339    8431 out.go:338] TERM=,COLORTERM=, which probably does not support color
	I0620 17:01:46.797559    8431 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19106-2452/.minikube/bin
	I0620 17:01:46.797993    8431 out.go:298] Setting JSON to false
	I0620 17:01:46.798699    8431 start.go:129] hostinfo: {"hostname":"ip-172-31-30-239","uptime":2658,"bootTime":1718900249,"procs":146,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1063-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"92f46a7d-c249-4c12-924a-77f64874c910"}
	I0620 17:01:46.798767    8431 start.go:139] virtualization:  
	I0620 17:01:46.801375    8431 out.go:177] * [addons-705802] minikube v1.33.1 on Ubuntu 20.04 (arm64)
	I0620 17:01:46.804561    8431 out.go:177]   - MINIKUBE_LOCATION=19106
	I0620 17:01:46.804723    8431 notify.go:220] Checking for updates...
	I0620 17:01:46.809126    8431 out.go:177]   - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I0620 17:01:46.811288    8431 out.go:177]   - KUBECONFIG=/home/jenkins/minikube-integration/19106-2452/kubeconfig
	I0620 17:01:46.814129    8431 out.go:177]   - MINIKUBE_HOME=/home/jenkins/minikube-integration/19106-2452/.minikube
	I0620 17:01:46.816062    8431 out.go:177]   - MINIKUBE_BIN=out/minikube-linux-arm64
	I0620 17:01:46.817890    8431 out.go:177]   - MINIKUBE_FORCE_SYSTEMD=
	I0620 17:01:46.820047    8431 driver.go:392] Setting default libvirt URI to qemu:///system
	I0620 17:01:46.846394    8431 docker.go:122] docker version: linux-26.1.4:Docker Engine - Community
	I0620 17:01:46.846506    8431 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0620 17:01:46.908210    8431 info.go:266] docker info: {ID:6ZPO:QZND:VNGE:LUKL:4Y3K:XELL:AAX4:2GTK:E6LM:MPRN:3ZXR:TTMR Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:29 OomKillDisable:true NGoroutines:45 SystemTime:2024-06-20 17:01:46.898742745 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1063-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aar
ch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214900736 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-30-239 Labels:[] ExperimentalBuild:false ServerVersion:26.1.4 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d2d58213f83a351ca8f528a95fbd145f5654e957 Expected:d2d58213f83a351ca8f528a95fbd145f5654e957} RuncCommit:{ID:v1.1.12-0-g51d5e94 Expected:v1.1.12-0-g51d5e94} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErro
rs:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.14.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.27.1]] Warnings:<nil>}}
	I0620 17:01:46.908319    8431 docker.go:295] overlay module found
	I0620 17:01:46.910765    8431 out.go:177] * Using the docker driver based on user configuration
	I0620 17:01:46.912519    8431 start.go:297] selected driver: docker
	I0620 17:01:46.912537    8431 start.go:901] validating driver "docker" against <nil>
	I0620 17:01:46.912551    8431 start.go:912] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
	I0620 17:01:46.913191    8431 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0620 17:01:46.969854    8431 info.go:266] docker info: {ID:6ZPO:QZND:VNGE:LUKL:4Y3K:XELL:AAX4:2GTK:E6LM:MPRN:3ZXR:TTMR Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:29 OomKillDisable:true NGoroutines:45 SystemTime:2024-06-20 17:01:46.961290862 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1063-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aar
ch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214900736 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-30-239 Labels:[] ExperimentalBuild:false ServerVersion:26.1.4 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d2d58213f83a351ca8f528a95fbd145f5654e957 Expected:d2d58213f83a351ca8f528a95fbd145f5654e957} RuncCommit:{ID:v1.1.12-0-g51d5e94 Expected:v1.1.12-0-g51d5e94} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErro
rs:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.14.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.27.1]] Warnings:<nil>}}
	I0620 17:01:46.970021    8431 start_flags.go:310] no existing cluster config was found, will generate one from the flags 
	I0620 17:01:46.970242    8431 start_flags.go:947] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
	I0620 17:01:46.972163    8431 out.go:177] * Using Docker driver with root privileges
	I0620 17:01:46.974233    8431 cni.go:84] Creating CNI manager for ""
	I0620 17:01:46.974266    8431 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
	I0620 17:01:46.974275    8431 start_flags.go:319] Found "bridge CNI" CNI - setting NetworkPlugin=cni
	I0620 17:01:46.974359    8431 start.go:340] cluster config:
	{Name:addons-705802 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.30.2 ClusterName:addons-705802 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime
:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.30.2 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock:
SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I0620 17:01:46.976591    8431 out.go:177] * Starting "addons-705802" primary control-plane node in "addons-705802" cluster
	I0620 17:01:46.979041    8431 cache.go:121] Beginning downloading kic base image for docker with docker
	I0620 17:01:46.981125    8431 out.go:177] * Pulling base image v0.0.44-1718753665-19106 ...
	I0620 17:01:46.983361    8431 preload.go:132] Checking if preload exists for k8s version v1.30.2 and runtime docker
	I0620 17:01:46.983435    8431 preload.go:147] Found local preload: /home/jenkins/minikube-integration/19106-2452/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.30.2-docker-overlay2-arm64.tar.lz4
	I0620 17:01:46.983452    8431 cache.go:56] Caching tarball of preloaded images
	I0620 17:01:46.983459    8431 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 in local docker daemon
	I0620 17:01:46.983530    8431 preload.go:173] Found /home/jenkins/minikube-integration/19106-2452/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.30.2-docker-overlay2-arm64.tar.lz4 in cache, skipping download
	I0620 17:01:46.983540    8431 cache.go:59] Finished verifying existence of preloaded tar for v1.30.2 on docker
	I0620 17:01:46.983881    8431 profile.go:143] Saving config to /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/config.json ...
	I0620 17:01:46.983909    8431 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/config.json: {Name:mk8092ed4ba68f2fadffafa227bb26dad0102414 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0620 17:01:46.999848    8431 cache.go:149] Downloading gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 to local cache
	I0620 17:01:47.000007    8431 image.go:63] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 in local cache directory
	I0620 17:01:47.000030    8431 image.go:66] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 in local cache directory, skipping pull
	I0620 17:01:47.000035    8431 image.go:105] gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 exists in cache, skipping pull
	I0620 17:01:47.000042    8431 cache.go:152] successfully saved gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 as a tarball
	I0620 17:01:47.000051    8431 cache.go:162] Loading gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 from local cache
	I0620 17:02:03.556270    8431 cache.go:164] successfully loaded and using gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 from cached tarball
	I0620 17:02:03.556307    8431 cache.go:194] Successfully downloaded all kic artifacts
	I0620 17:02:03.556361    8431 start.go:360] acquireMachinesLock for addons-705802: {Name:mk2e8a2a292f9884930ea1a50d709198f4f6c1b6 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0620 17:02:03.556478    8431 start.go:364] duration metric: took 87.104µs to acquireMachinesLock for "addons-705802"
	I0620 17:02:03.556509    8431 start.go:93] Provisioning new machine with config: &{Name:addons-705802 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.30.2 ClusterName:addons-705802 Namespace:default APIServerHAVIP: APIServerName:min
ikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.30.2 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false Cust
omQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.30.2 ContainerRuntime:docker ControlPlane:true Worker:true}
	I0620 17:02:03.556590    8431 start.go:125] createHost starting for "" (driver="docker")
	I0620 17:02:03.559037    8431 out.go:204] * Creating docker container (CPUs=2, Memory=4000MB) ...
	I0620 17:02:03.559281    8431 start.go:159] libmachine.API.Create for "addons-705802" (driver="docker")
	I0620 17:02:03.559314    8431 client.go:168] LocalClient.Create starting
	I0620 17:02:03.559423    8431 main.go:141] libmachine: Creating CA: /home/jenkins/minikube-integration/19106-2452/.minikube/certs/ca.pem
	I0620 17:02:04.147386    8431 main.go:141] libmachine: Creating client certificate: /home/jenkins/minikube-integration/19106-2452/.minikube/certs/cert.pem
	I0620 17:02:05.015625    8431 cli_runner.go:164] Run: docker network inspect addons-705802 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
	W0620 17:02:05.031007    8431 cli_runner.go:211] docker network inspect addons-705802 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
	I0620 17:02:05.031095    8431 network_create.go:284] running [docker network inspect addons-705802] to gather additional debugging logs...
	I0620 17:02:05.031116    8431 cli_runner.go:164] Run: docker network inspect addons-705802
	W0620 17:02:05.049636    8431 cli_runner.go:211] docker network inspect addons-705802 returned with exit code 1
	I0620 17:02:05.049668    8431 network_create.go:287] error running [docker network inspect addons-705802]: docker network inspect addons-705802: exit status 1
	stdout:
	[]
	
	stderr:
	Error response from daemon: network addons-705802 not found
	I0620 17:02:05.049681    8431 network_create.go:289] output of [docker network inspect addons-705802]: -- stdout --
	[]
	
	-- /stdout --
	** stderr ** 
	Error response from daemon: network addons-705802 not found
	
	** /stderr **
	I0620 17:02:05.049795    8431 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
	I0620 17:02:05.065207    8431 network.go:206] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x40006b4d20}
	I0620 17:02:05.065247    8431 network_create.go:124] attempt to create docker network addons-705802 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
	I0620 17:02:05.065302    8431 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=addons-705802 addons-705802
	I0620 17:02:05.133371    8431 network_create.go:108] docker network addons-705802 192.168.49.0/24 created
	I0620 17:02:05.133404    8431 kic.go:121] calculated static IP "192.168.49.2" for the "addons-705802" container
	I0620 17:02:05.133477    8431 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
	I0620 17:02:05.146626    8431 cli_runner.go:164] Run: docker volume create addons-705802 --label name.minikube.sigs.k8s.io=addons-705802 --label created_by.minikube.sigs.k8s.io=true
	I0620 17:02:05.164334    8431 oci.go:103] Successfully created a docker volume addons-705802
	I0620 17:02:05.164444    8431 cli_runner.go:164] Run: docker run --rm --name addons-705802-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-705802 --entrypoint /usr/bin/test -v addons-705802:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 -d /var/lib
	I0620 17:02:07.554766    8431 cli_runner.go:217] Completed: docker run --rm --name addons-705802-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-705802 --entrypoint /usr/bin/test -v addons-705802:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 -d /var/lib: (2.390277597s)
	I0620 17:02:07.554795    8431 oci.go:107] Successfully prepared a docker volume addons-705802
	I0620 17:02:07.554824    8431 preload.go:132] Checking if preload exists for k8s version v1.30.2 and runtime docker
	I0620 17:02:07.554843    8431 kic.go:194] Starting extracting preloaded images to volume ...
	I0620 17:02:07.554934    8431 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/19106-2452/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.30.2-docker-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v addons-705802:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 -I lz4 -xf /preloaded.tar -C /extractDir
	I0620 17:02:11.503519    8431 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/19106-2452/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.30.2-docker-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v addons-705802:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 -I lz4 -xf /preloaded.tar -C /extractDir: (3.948548056s)
	I0620 17:02:11.503550    8431 kic.go:203] duration metric: took 3.948703696s to extract preloaded images to volume ...
	W0620 17:02:11.503707    8431 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
	I0620 17:02:11.503817    8431 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
	I0620 17:02:11.560863    8431 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname addons-705802 --name addons-705802 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-705802 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=addons-705802 --network addons-705802 --ip 192.168.49.2 --volume addons-705802:/var --security-opt apparmor=unconfined --memory=4000mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636
	I0620 17:02:11.910234    8431 cli_runner.go:164] Run: docker container inspect addons-705802 --format={{.State.Running}}
	I0620 17:02:11.941553    8431 cli_runner.go:164] Run: docker container inspect addons-705802 --format={{.State.Status}}
	I0620 17:02:11.965756    8431 cli_runner.go:164] Run: docker exec addons-705802 stat /var/lib/dpkg/alternatives/iptables
	I0620 17:02:12.033251    8431 oci.go:144] the created container "addons-705802" has a running status.
	I0620 17:02:12.033285    8431 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/19106-2452/.minikube/machines/addons-705802/id_rsa...
	I0620 17:02:12.209067    8431 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/19106-2452/.minikube/machines/addons-705802/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
	I0620 17:02:12.232255    8431 cli_runner.go:164] Run: docker container inspect addons-705802 --format={{.State.Status}}
	I0620 17:02:12.263587    8431 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
	I0620 17:02:12.263606    8431 kic_runner.go:114] Args: [docker exec --privileged addons-705802 chown docker:docker /home/docker/.ssh/authorized_keys]
	I0620 17:02:12.321645    8431 cli_runner.go:164] Run: docker container inspect addons-705802 --format={{.State.Status}}
	I0620 17:02:12.344164    8431 machine.go:94] provisionDockerMachine start ...
	I0620 17:02:12.344255    8431 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-705802
	I0620 17:02:12.365478    8431 main.go:141] libmachine: Using SSH client type: native
	I0620 17:02:12.365726    8431 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e2bc0] 0x3e5420 <nil>  [] 0s} 127.0.0.1 32772 <nil> <nil>}
	I0620 17:02:12.365734    8431 main.go:141] libmachine: About to run SSH command:
	hostname
	I0620 17:02:12.366380    8431 main.go:141] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:36786->127.0.0.1:32772: read: connection reset by peer
	I0620 17:02:15.494391    8431 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-705802
	
	I0620 17:02:15.494416    8431 ubuntu.go:169] provisioning hostname "addons-705802"
	I0620 17:02:15.494484    8431 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-705802
	I0620 17:02:15.511642    8431 main.go:141] libmachine: Using SSH client type: native
	I0620 17:02:15.511897    8431 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e2bc0] 0x3e5420 <nil>  [] 0s} 127.0.0.1 32772 <nil> <nil>}
	I0620 17:02:15.511914    8431 main.go:141] libmachine: About to run SSH command:
	sudo hostname addons-705802 && echo "addons-705802" | sudo tee /etc/hostname
	I0620 17:02:15.655794    8431 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-705802
	
	I0620 17:02:15.655877    8431 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-705802
	I0620 17:02:15.673225    8431 main.go:141] libmachine: Using SSH client type: native
	I0620 17:02:15.673473    8431 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e2bc0] 0x3e5420 <nil>  [] 0s} 127.0.0.1 32772 <nil> <nil>}
	I0620 17:02:15.673494    8431 main.go:141] libmachine: About to run SSH command:
	
			if ! grep -xq '.*\saddons-705802' /etc/hosts; then
				if grep -xq '127.0.1.1\s.*' /etc/hosts; then
					sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 addons-705802/g' /etc/hosts;
				else 
					echo '127.0.1.1 addons-705802' | sudo tee -a /etc/hosts; 
				fi
			fi
	I0620 17:02:15.803113    8431 main.go:141] libmachine: SSH cmd err, output: <nil>: 
	I0620 17:02:15.803141    8431 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/19106-2452/.minikube CaCertPath:/home/jenkins/minikube-integration/19106-2452/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/19106-2452/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/19106-2452/.minikube}
	I0620 17:02:15.803171    8431 ubuntu.go:177] setting up certificates
	I0620 17:02:15.803180    8431 provision.go:84] configureAuth start
	I0620 17:02:15.803254    8431 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-705802
	I0620 17:02:15.820171    8431 provision.go:143] copyHostCerts
	I0620 17:02:15.820260    8431 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19106-2452/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/19106-2452/.minikube/ca.pem (1078 bytes)
	I0620 17:02:15.820387    8431 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19106-2452/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/19106-2452/.minikube/cert.pem (1123 bytes)
	I0620 17:02:15.820451    8431 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19106-2452/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/19106-2452/.minikube/key.pem (1675 bytes)
	I0620 17:02:15.820504    8431 provision.go:117] generating server cert: /home/jenkins/minikube-integration/19106-2452/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/19106-2452/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/19106-2452/.minikube/certs/ca-key.pem org=jenkins.addons-705802 san=[127.0.0.1 192.168.49.2 addons-705802 localhost minikube]
	I0620 17:02:16.245070    8431 provision.go:177] copyRemoteCerts
	I0620 17:02:16.245137    8431 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
	I0620 17:02:16.245182    8431 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-705802
	I0620 17:02:16.261353    8431 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/addons-705802/id_rsa Username:docker}
	I0620 17:02:16.355590    8431 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
	I0620 17:02:16.379360    8431 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/machines/server.pem --> /etc/docker/server.pem (1208 bytes)
	I0620 17:02:16.402756    8431 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
	I0620 17:02:16.426685    8431 provision.go:87] duration metric: took 623.490205ms to configureAuth
	I0620 17:02:16.426710    8431 ubuntu.go:193] setting minikube options for container-runtime
	I0620 17:02:16.426904    8431 config.go:182] Loaded profile config "addons-705802": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.30.2
	I0620 17:02:16.426971    8431 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-705802
	I0620 17:02:16.443709    8431 main.go:141] libmachine: Using SSH client type: native
	I0620 17:02:16.443949    8431 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e2bc0] 0x3e5420 <nil>  [] 0s} 127.0.0.1 32772 <nil> <nil>}
	I0620 17:02:16.443963    8431 main.go:141] libmachine: About to run SSH command:
	df --output=fstype / | tail -n 1
	I0620 17:02:16.576017    8431 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
	
	I0620 17:02:16.576058    8431 ubuntu.go:71] root file system type: overlay
	I0620 17:02:16.576172    8431 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
	I0620 17:02:16.576248    8431 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-705802
	I0620 17:02:16.592308    8431 main.go:141] libmachine: Using SSH client type: native
	I0620 17:02:16.592547    8431 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e2bc0] 0x3e5420 <nil>  [] 0s} 127.0.0.1 32772 <nil> <nil>}
	I0620 17:02:16.592627    8431 main.go:141] libmachine: About to run SSH command:
	sudo mkdir -p /lib/systemd/system && printf %!s(MISSING) "[Unit]
	Description=Docker Application Container Engine
	Documentation=https://docs.docker.com
	BindsTo=containerd.service
	After=network-online.target firewalld.service containerd.service
	Wants=network-online.target
	Requires=docker.socket
	StartLimitBurst=3
	StartLimitIntervalSec=60
	
	[Service]
	Type=notify
	Restart=on-failure
	
	
	
	# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
	# The base configuration already specifies an 'ExecStart=...' command. The first directive
	# here is to clear out that command inherited from the base configuration. Without this,
	# the command from the base configuration and the command specified here are treated as
	# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
	# will catch this invalid input and refuse to start the service with an error like:
	#  Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
	
	# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
	# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
	ExecStart=
	ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 
	ExecReload=/bin/kill -s HUP \$MAINPID
	
	# Having non-zero Limit*s causes performance problems due to accounting overhead
	# in the kernel. We recommend using cgroups to do container-local accounting.
	LimitNOFILE=infinity
	LimitNPROC=infinity
	LimitCORE=infinity
	
	# Uncomment TasksMax if your systemd version supports it.
	# Only systemd 226 and above support this version.
	TasksMax=infinity
	TimeoutStartSec=0
	
	# set delegate yes so that systemd does not reset the cgroups of docker containers
	Delegate=yes
	
	# kill only the docker process, not all processes in the cgroup
	KillMode=process
	
	[Install]
	WantedBy=multi-user.target
	" | sudo tee /lib/systemd/system/docker.service.new
	I0620 17:02:16.734507    8431 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
	Description=Docker Application Container Engine
	Documentation=https://docs.docker.com
	BindsTo=containerd.service
	After=network-online.target firewalld.service containerd.service
	Wants=network-online.target
	Requires=docker.socket
	StartLimitBurst=3
	StartLimitIntervalSec=60
	
	[Service]
	Type=notify
	Restart=on-failure
	
	
	
	# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
	# The base configuration already specifies an 'ExecStart=...' command. The first directive
	# here is to clear out that command inherited from the base configuration. Without this,
	# the command from the base configuration and the command specified here are treated as
	# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
	# will catch this invalid input and refuse to start the service with an error like:
	#  Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
	
	# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
	# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
	ExecStart=
	ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 
	ExecReload=/bin/kill -s HUP $MAINPID
	
	# Having non-zero Limit*s causes performance problems due to accounting overhead
	# in the kernel. We recommend using cgroups to do container-local accounting.
	LimitNOFILE=infinity
	LimitNPROC=infinity
	LimitCORE=infinity
	
	# Uncomment TasksMax if your systemd version supports it.
	# Only systemd 226 and above support this version.
	TasksMax=infinity
	TimeoutStartSec=0
	
	# set delegate yes so that systemd does not reset the cgroups of docker containers
	Delegate=yes
	
	# kill only the docker process, not all processes in the cgroup
	KillMode=process
	
	[Install]
	WantedBy=multi-user.target
	
	I0620 17:02:16.734589    8431 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-705802
	I0620 17:02:16.751467    8431 main.go:141] libmachine: Using SSH client type: native
	I0620 17:02:16.751712    8431 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e2bc0] 0x3e5420 <nil>  [] 0s} 127.0.0.1 32772 <nil> <nil>}
	I0620 17:02:16.751734    8431 main.go:141] libmachine: About to run SSH command:
	sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
	I0620 17:02:17.477024    8431 main.go:141] libmachine: SSH cmd err, output: <nil>: --- /lib/systemd/system/docker.service	2024-06-05 11:27:22.000000000 +0000
	+++ /lib/systemd/system/docker.service.new	2024-06-20 17:02:16.730535213 +0000
	@@ -1,46 +1,49 @@
	 [Unit]
	 Description=Docker Application Container Engine
	 Documentation=https://docs.docker.com
	-After=network-online.target docker.socket firewalld.service containerd.service time-set.target
	-Wants=network-online.target containerd.service
	+BindsTo=containerd.service
	+After=network-online.target firewalld.service containerd.service
	+Wants=network-online.target
	 Requires=docker.socket
	+StartLimitBurst=3
	+StartLimitIntervalSec=60
	 
	 [Service]
	 Type=notify
	-# the default is not to use systemd for cgroups because the delegate issues still
	-# exists and systemd currently does not support the cgroup feature set required
	-# for containers run by docker
	-ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
	-ExecReload=/bin/kill -s HUP $MAINPID
	-TimeoutStartSec=0
	-RestartSec=2
	-Restart=always
	+Restart=on-failure
	 
	-# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
	-# Both the old, and new location are accepted by systemd 229 and up, so using the old location
	-# to make them work for either version of systemd.
	-StartLimitBurst=3
	 
	-# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
	-# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
	-# this option work for either version of systemd.
	-StartLimitInterval=60s
	+
	+# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
	+# The base configuration already specifies an 'ExecStart=...' command. The first directive
	+# here is to clear out that command inherited from the base configuration. Without this,
	+# the command from the base configuration and the command specified here are treated as
	+# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
	+# will catch this invalid input and refuse to start the service with an error like:
	+#  Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
	+
	+# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
	+# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
	+ExecStart=
	+ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 
	+ExecReload=/bin/kill -s HUP $MAINPID
	 
	 # Having non-zero Limit*s causes performance problems due to accounting overhead
	 # in the kernel. We recommend using cgroups to do container-local accounting.
	+LimitNOFILE=infinity
	 LimitNPROC=infinity
	 LimitCORE=infinity
	 
	-# Comment TasksMax if your systemd version does not support it.
	-# Only systemd 226 and above support this option.
	+# Uncomment TasksMax if your systemd version supports it.
	+# Only systemd 226 and above support this version.
	 TasksMax=infinity
	+TimeoutStartSec=0
	 
	 # set delegate yes so that systemd does not reset the cgroups of docker containers
	 Delegate=yes
	 
	 # kill only the docker process, not all processes in the cgroup
	 KillMode=process
	-OOMScoreAdjust=-500
	 
	 [Install]
	 WantedBy=multi-user.target
	Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
	Executing: /lib/systemd/systemd-sysv-install enable docker
	
	I0620 17:02:17.477054    8431 machine.go:97] duration metric: took 5.132869972s to provisionDockerMachine
	I0620 17:02:17.477066    8431 client.go:171] duration metric: took 13.917740513s to LocalClient.Create
	I0620 17:02:17.477084    8431 start.go:167] duration metric: took 13.917802543s to libmachine.API.Create "addons-705802"
	I0620 17:02:17.477091    8431 start.go:293] postStartSetup for "addons-705802" (driver="docker")
	I0620 17:02:17.477101    8431 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
	I0620 17:02:17.477169    8431 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
	I0620 17:02:17.477215    8431 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-705802
	I0620 17:02:17.493644    8431 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/addons-705802/id_rsa Username:docker}
	I0620 17:02:17.588151    8431 ssh_runner.go:195] Run: cat /etc/os-release
	I0620 17:02:17.591356    8431 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
	I0620 17:02:17.591400    8431 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
	I0620 17:02:17.591412    8431 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
	I0620 17:02:17.591419    8431 info.go:137] Remote host: Ubuntu 22.04.4 LTS
	I0620 17:02:17.591432    8431 filesync.go:126] Scanning /home/jenkins/minikube-integration/19106-2452/.minikube/addons for local assets ...
	I0620 17:02:17.591509    8431 filesync.go:126] Scanning /home/jenkins/minikube-integration/19106-2452/.minikube/files for local assets ...
	I0620 17:02:17.591537    8431 start.go:296] duration metric: took 114.440402ms for postStartSetup
	I0620 17:02:17.591881    8431 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-705802
	I0620 17:02:17.607312    8431 profile.go:143] Saving config to /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/config.json ...
	I0620 17:02:17.607591    8431 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I0620 17:02:17.607640    8431 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-705802
	I0620 17:02:17.623116    8431 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/addons-705802/id_rsa Username:docker}
	I0620 17:02:17.712104    8431 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
	I0620 17:02:17.716838    8431 start.go:128] duration metric: took 14.160232665s to createHost
	I0620 17:02:17.716862    8431 start.go:83] releasing machines lock for "addons-705802", held for 14.16037014s
	I0620 17:02:17.716945    8431 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-705802
	I0620 17:02:17.736986    8431 ssh_runner.go:195] Run: cat /version.json
	I0620 17:02:17.737034    8431 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-705802
	I0620 17:02:17.737074    8431 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
	I0620 17:02:17.737132    8431 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-705802
	I0620 17:02:17.761292    8431 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/addons-705802/id_rsa Username:docker}
	I0620 17:02:17.772604    8431 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/addons-705802/id_rsa Username:docker}
	I0620 17:02:17.976121    8431 ssh_runner.go:195] Run: systemctl --version
	I0620 17:02:17.980386    8431 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
	I0620 17:02:17.984659    8431 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
	I0620 17:02:18.020079    8431 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
	I0620 17:02:18.020227    8431 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%!p(MISSING), " -exec sh -c "sudo mv {} {}.mk_disabled" ;
	I0620 17:02:18.054418    8431 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
	I0620 17:02:18.054455    8431 start.go:494] detecting cgroup driver to use...
	I0620 17:02:18.054505    8431 detect.go:196] detected "cgroupfs" cgroup driver on host os
	I0620 17:02:18.054650    8431 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %!s(MISSING) "runtime-endpoint: unix:///run/containerd/containerd.sock
	" | sudo tee /etc/crictl.yaml"
	I0620 17:02:18.072859    8431 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.9"|' /etc/containerd/config.toml"
	I0620 17:02:18.084207    8431 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
	I0620 17:02:18.095726    8431 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
	I0620 17:02:18.095799    8431 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
	I0620 17:02:18.107049    8431 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I0620 17:02:18.118020    8431 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
	I0620 17:02:18.128411    8431 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I0620 17:02:18.138656    8431 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
	I0620 17:02:18.148409    8431 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
	I0620 17:02:18.158726    8431 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
	I0620 17:02:18.168949    8431 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1  enable_unprivileged_ports = true|' /etc/containerd/config.toml"
	I0620 17:02:18.179121    8431 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
	I0620 17:02:18.187581    8431 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
	I0620 17:02:18.196377    8431 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0620 17:02:18.287355    8431 ssh_runner.go:195] Run: sudo systemctl restart containerd
	I0620 17:02:18.412891    8431 start.go:494] detecting cgroup driver to use...
	I0620 17:02:18.412974    8431 detect.go:196] detected "cgroupfs" cgroup driver on host os
	I0620 17:02:18.413053    8431 ssh_runner.go:195] Run: sudo systemctl cat docker.service
	I0620 17:02:18.432779    8431 cruntime.go:279] skipping containerd shutdown because we are bound to it
	I0620 17:02:18.432896    8431 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
	I0620 17:02:18.445008    8431 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %!s(MISSING) "runtime-endpoint: unix:///var/run/cri-dockerd.sock
	" | sudo tee /etc/crictl.yaml"
	I0620 17:02:18.465723    8431 ssh_runner.go:195] Run: which cri-dockerd
	I0620 17:02:18.483404    8431 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
	I0620 17:02:18.492432    8431 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (189 bytes)
	I0620 17:02:18.511816    8431 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
	I0620 17:02:18.617638    8431 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
	I0620 17:02:18.710051    8431 docker.go:574] configuring docker to use "cgroupfs" as cgroup driver...
	I0620 17:02:18.710247    8431 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
	I0620 17:02:18.730620    8431 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0620 17:02:18.820477    8431 ssh_runner.go:195] Run: sudo systemctl restart docker
	I0620 17:02:19.074043    8431 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
	I0620 17:02:19.086327    8431 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
	I0620 17:02:19.098568    8431 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
	I0620 17:02:19.193215    8431 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
	I0620 17:02:19.276534    8431 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0620 17:02:19.357705    8431 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
	I0620 17:02:19.371312    8431 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
	I0620 17:02:19.382733    8431 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0620 17:02:19.467426    8431 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
	I0620 17:02:19.540994    8431 start.go:541] Will wait 60s for socket path /var/run/cri-dockerd.sock
	I0620 17:02:19.541129    8431 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
	I0620 17:02:19.545867    8431 start.go:562] Will wait 60s for crictl version
	I0620 17:02:19.545954    8431 ssh_runner.go:195] Run: which crictl
	I0620 17:02:19.549490    8431 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
	I0620 17:02:19.584852    8431 start.go:578] Version:  0.1.0
	RuntimeName:  docker
	RuntimeVersion:  26.1.4
	RuntimeApiVersion:  v1
	I0620 17:02:19.584962    8431 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
	I0620 17:02:19.606559    8431 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
	I0620 17:02:19.629761    8431 out.go:204] * Preparing Kubernetes v1.30.2 on Docker 26.1.4 ...
	I0620 17:02:19.629887    8431 cli_runner.go:164] Run: docker network inspect addons-705802 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
	I0620 17:02:19.649991    8431 ssh_runner.go:195] Run: grep 192.168.49.1	host.minikube.internal$ /etc/hosts
	I0620 17:02:19.653913    8431 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1	host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I0620 17:02:19.665093    8431 kubeadm.go:877] updating cluster {Name:addons-705802 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.30.2 ClusterName:addons-705802 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNa
mes:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.30.2 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuF
irmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
	I0620 17:02:19.665208    8431 preload.go:132] Checking if preload exists for k8s version v1.30.2 and runtime docker
	I0620 17:02:19.665260    8431 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
	I0620 17:02:19.682073    8431 docker.go:685] Got preloaded images: -- stdout --
	registry.k8s.io/kube-apiserver:v1.30.2
	registry.k8s.io/kube-scheduler:v1.30.2
	registry.k8s.io/kube-controller-manager:v1.30.2
	registry.k8s.io/kube-proxy:v1.30.2
	registry.k8s.io/etcd:3.5.12-0
	registry.k8s.io/coredns/coredns:v1.11.1
	registry.k8s.io/pause:3.9
	gcr.io/k8s-minikube/storage-provisioner:v5
	
	-- /stdout --
	I0620 17:02:19.682095    8431 docker.go:615] Images already preloaded, skipping extraction
	I0620 17:02:19.682158    8431 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
	I0620 17:02:19.699140    8431 docker.go:685] Got preloaded images: -- stdout --
	registry.k8s.io/kube-apiserver:v1.30.2
	registry.k8s.io/kube-scheduler:v1.30.2
	registry.k8s.io/kube-controller-manager:v1.30.2
	registry.k8s.io/kube-proxy:v1.30.2
	registry.k8s.io/etcd:3.5.12-0
	registry.k8s.io/coredns/coredns:v1.11.1
	registry.k8s.io/pause:3.9
	gcr.io/k8s-minikube/storage-provisioner:v5
	
	-- /stdout --
	I0620 17:02:19.699161    8431 cache_images.go:84] Images are preloaded, skipping loading
	I0620 17:02:19.699177    8431 kubeadm.go:928] updating node { 192.168.49.2 8443 v1.30.2 docker true true} ...
	I0620 17:02:19.699273    8431 kubeadm.go:940] kubelet [Unit]
	Wants=docker.socket
	
	[Service]
	ExecStart=
	ExecStart=/var/lib/minikube/binaries/v1.30.2/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=addons-705802 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
	
	[Install]
	 config:
	{KubernetesVersion:v1.30.2 ClusterName:addons-705802 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
	I0620 17:02:19.699338    8431 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
	I0620 17:02:19.743110    8431 cni.go:84] Creating CNI manager for ""
	I0620 17:02:19.743135    8431 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
	I0620 17:02:19.743145    8431 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
	I0620 17:02:19.743190    8431 kubeadm.go:181] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.30.2 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:addons-705802 NodeName:addons-705802 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kuber
netes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
	I0620 17:02:19.743376    8431 kubeadm.go:187] kubeadm config:
	apiVersion: kubeadm.k8s.io/v1beta3
	kind: InitConfiguration
	localAPIEndpoint:
	  advertiseAddress: 192.168.49.2
	  bindPort: 8443
	bootstrapTokens:
	  - groups:
	      - system:bootstrappers:kubeadm:default-node-token
	    ttl: 24h0m0s
	    usages:
	      - signing
	      - authentication
	nodeRegistration:
	  criSocket: unix:///var/run/cri-dockerd.sock
	  name: "addons-705802"
	  kubeletExtraArgs:
	    node-ip: 192.168.49.2
	  taints: []
	---
	apiVersion: kubeadm.k8s.io/v1beta3
	kind: ClusterConfiguration
	apiServer:
	  certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
	  extraArgs:
	    enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
	controllerManager:
	  extraArgs:
	    allocate-node-cidrs: "true"
	    leader-elect: "false"
	scheduler:
	  extraArgs:
	    leader-elect: "false"
	certificatesDir: /var/lib/minikube/certs
	clusterName: mk
	controlPlaneEndpoint: control-plane.minikube.internal:8443
	etcd:
	  local:
	    dataDir: /var/lib/minikube/etcd
	    extraArgs:
	      proxy-refresh-interval: "70000"
	kubernetesVersion: v1.30.2
	networking:
	  dnsDomain: cluster.local
	  podSubnet: "10.244.0.0/16"
	  serviceSubnet: 10.96.0.0/12
	---
	apiVersion: kubelet.config.k8s.io/v1beta1
	kind: KubeletConfiguration
	authentication:
	  x509:
	    clientCAFile: /var/lib/minikube/certs/ca.crt
	cgroupDriver: cgroupfs
	containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
	hairpinMode: hairpin-veth
	runtimeRequestTimeout: 15m
	clusterDomain: "cluster.local"
	# disable disk resource management by default
	imageGCHighThresholdPercent: 100
	evictionHard:
	  nodefs.available: "0%!"(MISSING)
	  nodefs.inodesFree: "0%!"(MISSING)
	  imagefs.available: "0%!"(MISSING)
	failSwapOn: false
	staticPodPath: /etc/kubernetes/manifests
	---
	apiVersion: kubeproxy.config.k8s.io/v1alpha1
	kind: KubeProxyConfiguration
	clusterCIDR: "10.244.0.0/16"
	metricsBindAddress: 0.0.0.0:10249
	conntrack:
	  maxPerCore: 0
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
	  tcpEstablishedTimeout: 0s
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
	  tcpCloseWaitTimeout: 0s
	
	I0620 17:02:19.743465    8431 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.30.2
	I0620 17:02:19.752047    8431 binaries.go:44] Found k8s binaries, skipping transfer
	I0620 17:02:19.752116    8431 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
	I0620 17:02:19.760575    8431 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (312 bytes)
	I0620 17:02:19.777678    8431 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
	I0620 17:02:19.794859    8431 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2155 bytes)
	I0620 17:02:19.812646    8431 ssh_runner.go:195] Run: grep 192.168.49.2	control-plane.minikube.internal$ /etc/hosts
	I0620 17:02:19.815938    8431 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.2	control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I0620 17:02:19.826438    8431 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0620 17:02:19.905036    8431 ssh_runner.go:195] Run: sudo systemctl start kubelet
	I0620 17:02:19.920213    8431 certs.go:68] Setting up /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802 for IP: 192.168.49.2
	I0620 17:02:19.920237    8431 certs.go:194] generating shared ca certs ...
	I0620 17:02:19.920253    8431 certs.go:226] acquiring lock for ca certs: {Name:mk1f8a102b3933d1e67f4b3f5a97c6bde91190df Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0620 17:02:19.920378    8431 certs.go:240] generating "minikubeCA" ca cert: /home/jenkins/minikube-integration/19106-2452/.minikube/ca.key
	I0620 17:02:20.386922    8431 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19106-2452/.minikube/ca.crt ...
	I0620 17:02:20.386953    8431 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19106-2452/.minikube/ca.crt: {Name:mk26d8b0fa16bc6e3112074b8a88c1a77ed3b2a2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0620 17:02:20.387184    8431 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19106-2452/.minikube/ca.key ...
	I0620 17:02:20.387200    8431 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19106-2452/.minikube/ca.key: {Name:mk9c12b486c0e185357270d8f32b76d0ba7fa805 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0620 17:02:20.387290    8431 certs.go:240] generating "proxyClientCA" ca cert: /home/jenkins/minikube-integration/19106-2452/.minikube/proxy-client-ca.key
	I0620 17:02:20.573169    8431 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19106-2452/.minikube/proxy-client-ca.crt ...
	I0620 17:02:20.573197    8431 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19106-2452/.minikube/proxy-client-ca.crt: {Name:mk29e909522f43892c7f1b023fcad63c0fac586e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0620 17:02:20.573365    8431 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19106-2452/.minikube/proxy-client-ca.key ...
	I0620 17:02:20.573377    8431 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19106-2452/.minikube/proxy-client-ca.key: {Name:mkd08bb5d25c89224c6784e6c317ba4ae589600b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0620 17:02:20.573454    8431 certs.go:256] generating profile certs ...
	I0620 17:02:20.573511    8431 certs.go:363] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.key
	I0620 17:02:20.573535    8431 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt with IP's: []
	I0620 17:02:21.267262    8431 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt ...
	I0620 17:02:21.267295    8431 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt: {Name:mk29441dbd5dac38e820ee4a77f8c74a82ef460c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0620 17:02:21.267475    8431 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.key ...
	I0620 17:02:21.267491    8431 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.key: {Name:mk88b2d9b8182eb71a4e4d60d8ac4075f862200b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0620 17:02:21.267566    8431 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/apiserver.key.016c8006
	I0620 17:02:21.267588    8431 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/apiserver.crt.016c8006 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2]
	I0620 17:02:21.707279    8431 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/apiserver.crt.016c8006 ...
	I0620 17:02:21.707309    8431 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/apiserver.crt.016c8006: {Name:mk40f67152c31eb9a6ce5025097e60a00bbb791a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0620 17:02:21.707481    8431 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/apiserver.key.016c8006 ...
	I0620 17:02:21.707495    8431 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/apiserver.key.016c8006: {Name:mk6404874d318d95913b8abec475e4b764cf3c17 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0620 17:02:21.707577    8431 certs.go:381] copying /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/apiserver.crt.016c8006 -> /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/apiserver.crt
	I0620 17:02:21.707720    8431 certs.go:385] copying /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/apiserver.key.016c8006 -> /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/apiserver.key
	I0620 17:02:21.707864    8431 certs.go:363] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/proxy-client.key
	I0620 17:02:21.707886    8431 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/proxy-client.crt with IP's: []
	I0620 17:02:22.864766    8431 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/proxy-client.crt ...
	I0620 17:02:22.864799    8431 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/proxy-client.crt: {Name:mk53f62337b3f14c490f90940cb8e50052b95801 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0620 17:02:22.865022    8431 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/proxy-client.key ...
	I0620 17:02:22.865038    8431 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/proxy-client.key: {Name:mkb3b58055de8c18ec179def66da5fbad6ca62c6 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0620 17:02:22.865217    8431 certs.go:484] found cert: /home/jenkins/minikube-integration/19106-2452/.minikube/certs/ca-key.pem (1675 bytes)
	I0620 17:02:22.865259    8431 certs.go:484] found cert: /home/jenkins/minikube-integration/19106-2452/.minikube/certs/ca.pem (1078 bytes)
	I0620 17:02:22.865288    8431 certs.go:484] found cert: /home/jenkins/minikube-integration/19106-2452/.minikube/certs/cert.pem (1123 bytes)
	I0620 17:02:22.865316    8431 certs.go:484] found cert: /home/jenkins/minikube-integration/19106-2452/.minikube/certs/key.pem (1675 bytes)
	I0620 17:02:22.865932    8431 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
	I0620 17:02:22.892157    8431 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
	I0620 17:02:22.917405    8431 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
	I0620 17:02:22.941671    8431 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
	I0620 17:02:22.965845    8431 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1419 bytes)
	I0620 17:02:22.989200    8431 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
	I0620 17:02:23.015869    8431 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
	I0620 17:02:23.046126    8431 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
	I0620 17:02:23.070665    8431 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
	I0620 17:02:23.104123    8431 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
	I0620 17:02:23.122454    8431 ssh_runner.go:195] Run: openssl version
	I0620 17:02:23.127717    8431 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
	I0620 17:02:23.137211    8431 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
	I0620 17:02:23.140709    8431 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Jun 20 17:02 /usr/share/ca-certificates/minikubeCA.pem
	I0620 17:02:23.140773    8431 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
	I0620 17:02:23.147779    8431 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
	I0620 17:02:23.157257    8431 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
	I0620 17:02:23.160473    8431 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
	stdout:
	
	stderr:
	stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
	I0620 17:02:23.160519    8431 kubeadm.go:391] StartCluster: {Name:addons-705802 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.30.2 ClusterName:addons-705802 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames
:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.30.2 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirm
warePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I0620 17:02:23.160643    8431 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
	I0620 17:02:23.177310    8431 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
	I0620 17:02:23.186118    8431 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
	I0620 17:02:23.194939    8431 kubeadm.go:213] ignoring SystemVerification for kubeadm because of docker driver
	I0620 17:02:23.195017    8431 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
	I0620 17:02:23.203828    8431 kubeadm.go:154] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
	stdout:
	
	stderr:
	ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
	I0620 17:02:23.203854    8431 kubeadm.go:156] found existing configuration files:
	
	I0620 17:02:23.203909    8431 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
	I0620 17:02:23.212571    8431 kubeadm.go:162] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
	stdout:
	
	stderr:
	grep: /etc/kubernetes/admin.conf: No such file or directory
	I0620 17:02:23.212659    8431 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
	I0620 17:02:23.220931    8431 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
	I0620 17:02:23.229628    8431 kubeadm.go:162] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
	stdout:
	
	stderr:
	grep: /etc/kubernetes/kubelet.conf: No such file or directory
	I0620 17:02:23.229723    8431 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
	I0620 17:02:23.238587    8431 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
	I0620 17:02:23.247234    8431 kubeadm.go:162] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
	stdout:
	
	stderr:
	grep: /etc/kubernetes/controller-manager.conf: No such file or directory
	I0620 17:02:23.247301    8431 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
	I0620 17:02:23.255576    8431 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
	I0620 17:02:23.264240    8431 kubeadm.go:162] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
	stdout:
	
	stderr:
	grep: /etc/kubernetes/scheduler.conf: No such file or directory
	I0620 17:02:23.264326    8431 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
	I0620 17:02:23.272478    8431 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.30.2:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml  --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
	I0620 17:02:23.319822    8431 kubeadm.go:309] [init] Using Kubernetes version: v1.30.2
	I0620 17:02:23.319885    8431 kubeadm.go:309] [preflight] Running pre-flight checks
	I0620 17:02:23.365874    8431 kubeadm.go:309] [preflight] The system verification failed. Printing the output from the verification:
	I0620 17:02:23.365950    8431 kubeadm.go:309] KERNEL_VERSION: 5.15.0-1063-aws
	I0620 17:02:23.365991    8431 kubeadm.go:309] OS: Linux
	I0620 17:02:23.366038    8431 kubeadm.go:309] CGROUPS_CPU: enabled
	I0620 17:02:23.366092    8431 kubeadm.go:309] CGROUPS_CPUACCT: enabled
	I0620 17:02:23.366141    8431 kubeadm.go:309] CGROUPS_CPUSET: enabled
	I0620 17:02:23.366191    8431 kubeadm.go:309] CGROUPS_DEVICES: enabled
	I0620 17:02:23.366241    8431 kubeadm.go:309] CGROUPS_FREEZER: enabled
	I0620 17:02:23.366294    8431 kubeadm.go:309] CGROUPS_MEMORY: enabled
	I0620 17:02:23.366340    8431 kubeadm.go:309] CGROUPS_PIDS: enabled
	I0620 17:02:23.366391    8431 kubeadm.go:309] CGROUPS_HUGETLB: enabled
	I0620 17:02:23.366438    8431 kubeadm.go:309] CGROUPS_BLKIO: enabled
	I0620 17:02:23.428524    8431 kubeadm.go:309] [preflight] Pulling images required for setting up a Kubernetes cluster
	I0620 17:02:23.428633    8431 kubeadm.go:309] [preflight] This might take a minute or two, depending on the speed of your internet connection
	I0620 17:02:23.428907    8431 kubeadm.go:309] [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
	I0620 17:02:23.670750    8431 kubeadm.go:309] [certs] Using certificateDir folder "/var/lib/minikube/certs"
	I0620 17:02:23.674572    8431 out.go:204]   - Generating certificates and keys ...
	I0620 17:02:23.674758    8431 kubeadm.go:309] [certs] Using existing ca certificate authority
	I0620 17:02:23.674868    8431 kubeadm.go:309] [certs] Using existing apiserver certificate and key on disk
	I0620 17:02:24.007257    8431 kubeadm.go:309] [certs] Generating "apiserver-kubelet-client" certificate and key
	I0620 17:02:24.206838    8431 kubeadm.go:309] [certs] Generating "front-proxy-ca" certificate and key
	I0620 17:02:25.217639    8431 kubeadm.go:309] [certs] Generating "front-proxy-client" certificate and key
	I0620 17:02:25.645105    8431 kubeadm.go:309] [certs] Generating "etcd/ca" certificate and key
	I0620 17:02:26.157744    8431 kubeadm.go:309] [certs] Generating "etcd/server" certificate and key
	I0620 17:02:26.158072    8431 kubeadm.go:309] [certs] etcd/server serving cert is signed for DNS names [addons-705802 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
	I0620 17:02:26.334326    8431 kubeadm.go:309] [certs] Generating "etcd/peer" certificate and key
	I0620 17:02:26.334619    8431 kubeadm.go:309] [certs] etcd/peer serving cert is signed for DNS names [addons-705802 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
	I0620 17:02:26.515271    8431 kubeadm.go:309] [certs] Generating "etcd/healthcheck-client" certificate and key
	I0620 17:02:26.833447    8431 kubeadm.go:309] [certs] Generating "apiserver-etcd-client" certificate and key
	I0620 17:02:27.234174    8431 kubeadm.go:309] [certs] Generating "sa" key and public key
	I0620 17:02:27.234442    8431 kubeadm.go:309] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
	I0620 17:02:27.623992    8431 kubeadm.go:309] [kubeconfig] Writing "admin.conf" kubeconfig file
	I0620 17:02:28.027615    8431 kubeadm.go:309] [kubeconfig] Writing "super-admin.conf" kubeconfig file
	I0620 17:02:28.160609    8431 kubeadm.go:309] [kubeconfig] Writing "kubelet.conf" kubeconfig file
	I0620 17:02:29.101262    8431 kubeadm.go:309] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
	I0620 17:02:29.646574    8431 kubeadm.go:309] [kubeconfig] Writing "scheduler.conf" kubeconfig file
	I0620 17:02:29.647317    8431 kubeadm.go:309] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
	I0620 17:02:29.650119    8431 kubeadm.go:309] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
	I0620 17:02:29.652656    8431 out.go:204]   - Booting up control plane ...
	I0620 17:02:29.652759    8431 kubeadm.go:309] [control-plane] Creating static Pod manifest for "kube-apiserver"
	I0620 17:02:29.652836    8431 kubeadm.go:309] [control-plane] Creating static Pod manifest for "kube-controller-manager"
	I0620 17:02:29.654583    8431 kubeadm.go:309] [control-plane] Creating static Pod manifest for "kube-scheduler"
	I0620 17:02:29.665250    8431 kubeadm.go:309] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
	I0620 17:02:29.666140    8431 kubeadm.go:309] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
	I0620 17:02:29.666356    8431 kubeadm.go:309] [kubelet-start] Starting the kubelet
	I0620 17:02:29.768850    8431 kubeadm.go:309] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
	I0620 17:02:29.768958    8431 kubeadm.go:309] [kubelet-check] Waiting for a healthy kubelet. This can take up to 4m0s
	I0620 17:02:30.770312    8431 kubeadm.go:309] [kubelet-check] The kubelet is healthy after 1.001550988s
	I0620 17:02:30.770402    8431 kubeadm.go:309] [api-check] Waiting for a healthy API server. This can take up to 4m0s
	I0620 17:02:37.272000    8431 kubeadm.go:309] [api-check] The API server is healthy after 6.501634358s
	I0620 17:02:37.297349    8431 kubeadm.go:309] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
	I0620 17:02:37.317703    8431 kubeadm.go:309] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
	I0620 17:02:37.368768    8431 kubeadm.go:309] [upload-certs] Skipping phase. Please see --upload-certs
	I0620 17:02:37.368955    8431 kubeadm.go:309] [mark-control-plane] Marking the node addons-705802 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
	I0620 17:02:37.383801    8431 kubeadm.go:309] [bootstrap-token] Using token: l2k49m.k1fhkmru3js8fac0
	I0620 17:02:37.385783    8431 out.go:204]   - Configuring RBAC rules ...
	I0620 17:02:37.385902    8431 kubeadm.go:309] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
	I0620 17:02:37.393906    8431 kubeadm.go:309] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
	I0620 17:02:37.402161    8431 kubeadm.go:309] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
	I0620 17:02:37.408770    8431 kubeadm.go:309] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
	I0620 17:02:37.412397    8431 kubeadm.go:309] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
	I0620 17:02:37.416232    8431 kubeadm.go:309] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
	I0620 17:02:37.678574    8431 kubeadm.go:309] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
	I0620 17:02:38.133939    8431 kubeadm.go:309] [addons] Applied essential addon: CoreDNS
	I0620 17:02:38.678618    8431 kubeadm.go:309] [addons] Applied essential addon: kube-proxy
	I0620 17:02:38.679722    8431 kubeadm.go:309] 
	I0620 17:02:38.679797    8431 kubeadm.go:309] Your Kubernetes control-plane has initialized successfully!
	I0620 17:02:38.679806    8431 kubeadm.go:309] 
	I0620 17:02:38.679881    8431 kubeadm.go:309] To start using your cluster, you need to run the following as a regular user:
	I0620 17:02:38.679885    8431 kubeadm.go:309] 
	I0620 17:02:38.679909    8431 kubeadm.go:309]   mkdir -p $HOME/.kube
	I0620 17:02:38.679966    8431 kubeadm.go:309]   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
	I0620 17:02:38.680014    8431 kubeadm.go:309]   sudo chown $(id -u):$(id -g) $HOME/.kube/config
	I0620 17:02:38.680019    8431 kubeadm.go:309] 
	I0620 17:02:38.680070    8431 kubeadm.go:309] Alternatively, if you are the root user, you can run:
	I0620 17:02:38.680074    8431 kubeadm.go:309] 
	I0620 17:02:38.680120    8431 kubeadm.go:309]   export KUBECONFIG=/etc/kubernetes/admin.conf
	I0620 17:02:38.680124    8431 kubeadm.go:309] 
	I0620 17:02:38.680185    8431 kubeadm.go:309] You should now deploy a pod network to the cluster.
	I0620 17:02:38.680257    8431 kubeadm.go:309] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
	I0620 17:02:38.680323    8431 kubeadm.go:309]   https://kubernetes.io/docs/concepts/cluster-administration/addons/
	I0620 17:02:38.680327    8431 kubeadm.go:309] 
	I0620 17:02:38.680407    8431 kubeadm.go:309] You can now join any number of control-plane nodes by copying certificate authorities
	I0620 17:02:38.680481    8431 kubeadm.go:309] and service account keys on each node and then running the following as root:
	I0620 17:02:38.680486    8431 kubeadm.go:309] 
	I0620 17:02:38.680572    8431 kubeadm.go:309]   kubeadm join control-plane.minikube.internal:8443 --token l2k49m.k1fhkmru3js8fac0 \
	I0620 17:02:38.680671    8431 kubeadm.go:309] 	--discovery-token-ca-cert-hash sha256:56a399a874c06050b1e615cba34cc00267ff85569d8529edb2baa3c00e1104f2 \
	I0620 17:02:38.680691    8431 kubeadm.go:309] 	--control-plane 
	I0620 17:02:38.680696    8431 kubeadm.go:309] 
	I0620 17:02:38.680777    8431 kubeadm.go:309] Then you can join any number of worker nodes by running the following on each as root:
	I0620 17:02:38.680781    8431 kubeadm.go:309] 
	I0620 17:02:38.680859    8431 kubeadm.go:309] kubeadm join control-plane.minikube.internal:8443 --token l2k49m.k1fhkmru3js8fac0 \
	I0620 17:02:38.681174    8431 kubeadm.go:309] 	--discovery-token-ca-cert-hash sha256:56a399a874c06050b1e615cba34cc00267ff85569d8529edb2baa3c00e1104f2 
	I0620 17:02:38.684430    8431 kubeadm.go:309] 	[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1063-aws\n", err: exit status 1
	I0620 17:02:38.684540    8431 kubeadm.go:309] 	[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
	I0620 17:02:38.684555    8431 cni.go:84] Creating CNI manager for ""
	I0620 17:02:38.684572    8431 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
	I0620 17:02:38.688681    8431 out.go:177] * Configuring bridge CNI (Container Networking Interface) ...
	I0620 17:02:38.690883    8431 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d
	I0620 17:02:38.700512    8431 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (496 bytes)
	I0620 17:02:38.719438    8431 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
	I0620 17:02:38.719560    8431 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 17:02:38.719641    8431 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes addons-705802 minikube.k8s.io/updated_at=2024_06_20T17_02_38_0700 minikube.k8s.io/version=v1.33.1 minikube.k8s.io/commit=a5bfa5828b76fe92a3c5f89a54d8c76f6b5f3f8b minikube.k8s.io/name=addons-705802 minikube.k8s.io/primary=true
	I0620 17:02:38.847994    8431 ops.go:34] apiserver oom_adj: -16
	I0620 17:02:38.848140    8431 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 17:02:39.348183    8431 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 17:02:39.848494    8431 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 17:02:40.349169    8431 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 17:02:40.849086    8431 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 17:02:41.348869    8431 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 17:02:41.848930    8431 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 17:02:42.349228    8431 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 17:02:42.848216    8431 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 17:02:43.349252    8431 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 17:02:43.848338    8431 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 17:02:44.349176    8431 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 17:02:44.848941    8431 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 17:02:45.349035    8431 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 17:02:45.848996    8431 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 17:02:46.349139    8431 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 17:02:46.849244    8431 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 17:02:47.348370    8431 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 17:02:47.849212    8431 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 17:02:48.348805    8431 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 17:02:48.848547    8431 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 17:02:49.348564    8431 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 17:02:49.848282    8431 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 17:02:50.349119    8431 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 17:02:50.848761    8431 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 17:02:51.349148    8431 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 17:02:51.848391    8431 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 17:02:52.351447    8431 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 17:02:52.512479    8431 kubeadm.go:1107] duration metric: took 13.792963448s to wait for elevateKubeSystemPrivileges
	W0620 17:02:52.512512    8431 kubeadm.go:286] apiserver tunnel failed: apiserver port not set
	I0620 17:02:52.512520    8431 kubeadm.go:393] duration metric: took 29.352006233s to StartCluster
	I0620 17:02:52.512537    8431 settings.go:142] acquiring lock: {Name:mk6241da33092f9e98cd6bf3e519e03a5a9ec197 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0620 17:02:52.512648    8431 settings.go:150] Updating kubeconfig:  /home/jenkins/minikube-integration/19106-2452/kubeconfig
	I0620 17:02:52.513069    8431 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19106-2452/kubeconfig: {Name:mk967cf82c0948dae02e8ac8d029ebc6e66977ec Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0620 17:02:52.513251    8431 start.go:234] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.30.2 ContainerRuntime:docker ControlPlane:true Worker:true}
	I0620 17:02:52.513400    8431 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.30.2/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
	I0620 17:02:52.513668    8431 config.go:182] Loaded profile config "addons-705802": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.30.2
	I0620 17:02:52.513697    8431 addons.go:507] enable addons start: toEnable=map[ambassador:false auto-pause:false cloud-spanner:true csi-hostpath-driver:true dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:true gvisor:false headlamp:false helm-tiller:false inaccel:false ingress:true ingress-dns:true inspektor-gadget:true istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-device-plugin:true nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:true registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:true volcano:true volumesnapshots:true yakd:true]
	I0620 17:02:52.513767    8431 addons.go:69] Setting yakd=true in profile "addons-705802"
	I0620 17:02:52.513788    8431 addons.go:234] Setting addon yakd=true in "addons-705802"
	I0620 17:02:52.513810    8431 host.go:66] Checking if "addons-705802" exists ...
	I0620 17:02:52.514287    8431 cli_runner.go:164] Run: docker container inspect addons-705802 --format={{.State.Status}}
	I0620 17:02:52.514804    8431 addons.go:69] Setting cloud-spanner=true in profile "addons-705802"
	I0620 17:02:52.514829    8431 addons.go:234] Setting addon cloud-spanner=true in "addons-705802"
	I0620 17:02:52.514851    8431 host.go:66] Checking if "addons-705802" exists ...
	I0620 17:02:52.515286    8431 cli_runner.go:164] Run: docker container inspect addons-705802 --format={{.State.Status}}
	I0620 17:02:52.515461    8431 addons.go:69] Setting nvidia-device-plugin=true in profile "addons-705802"
	I0620 17:02:52.515497    8431 addons.go:234] Setting addon nvidia-device-plugin=true in "addons-705802"
	I0620 17:02:52.515525    8431 host.go:66] Checking if "addons-705802" exists ...
	I0620 17:02:52.515944    8431 cli_runner.go:164] Run: docker container inspect addons-705802 --format={{.State.Status}}
	I0620 17:02:52.519073    8431 addons.go:69] Setting csi-hostpath-driver=true in profile "addons-705802"
	I0620 17:02:52.519141    8431 addons.go:234] Setting addon csi-hostpath-driver=true in "addons-705802"
	I0620 17:02:52.519170    8431 host.go:66] Checking if "addons-705802" exists ...
	I0620 17:02:52.519597    8431 cli_runner.go:164] Run: docker container inspect addons-705802 --format={{.State.Status}}
	I0620 17:02:52.521249    8431 addons.go:69] Setting registry=true in profile "addons-705802"
	I0620 17:02:52.522645    8431 addons.go:234] Setting addon registry=true in "addons-705802"
	I0620 17:02:52.522725    8431 host.go:66] Checking if "addons-705802" exists ...
	I0620 17:02:52.527299    8431 cli_runner.go:164] Run: docker container inspect addons-705802 --format={{.State.Status}}
	I0620 17:02:52.521399    8431 addons.go:69] Setting storage-provisioner=true in profile "addons-705802"
	I0620 17:02:52.531880    8431 addons.go:234] Setting addon storage-provisioner=true in "addons-705802"
	I0620 17:02:52.531924    8431 host.go:66] Checking if "addons-705802" exists ...
	I0620 17:02:52.532362    8431 cli_runner.go:164] Run: docker container inspect addons-705802 --format={{.State.Status}}
	I0620 17:02:52.521408    8431 addons.go:69] Setting storage-provisioner-rancher=true in profile "addons-705802"
	I0620 17:02:52.554436    8431 addons_storage_classes.go:33] enableOrDisableStorageClasses storage-provisioner-rancher=true on "addons-705802"
	I0620 17:02:52.554766    8431 cli_runner.go:164] Run: docker container inspect addons-705802 --format={{.State.Status}}
	I0620 17:02:52.521416    8431 addons.go:69] Setting volcano=true in profile "addons-705802"
	I0620 17:02:52.563927    8431 addons.go:234] Setting addon volcano=true in "addons-705802"
	I0620 17:02:52.521420    8431 addons.go:69] Setting volumesnapshots=true in profile "addons-705802"
	I0620 17:02:52.563974    8431 addons.go:234] Setting addon volumesnapshots=true in "addons-705802"
	I0620 17:02:52.564005    8431 host.go:66] Checking if "addons-705802" exists ...
	I0620 17:02:52.521471    8431 out.go:177] * Verifying Kubernetes components...
	I0620 17:02:52.572801    8431 host.go:66] Checking if "addons-705802" exists ...
	I0620 17:02:52.573258    8431 cli_runner.go:164] Run: docker container inspect addons-705802 --format={{.State.Status}}
	I0620 17:02:52.522555    8431 addons.go:69] Setting ingress-dns=true in profile "addons-705802"
	I0620 17:02:52.522565    8431 addons.go:69] Setting default-storageclass=true in profile "addons-705802"
	I0620 17:02:52.574079    8431 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "addons-705802"
	I0620 17:02:52.522572    8431 addons.go:69] Setting gcp-auth=true in profile "addons-705802"
	I0620 17:02:52.574411    8431 mustload.go:65] Loading cluster: addons-705802
	I0620 17:02:52.522579    8431 addons.go:69] Setting ingress=true in profile "addons-705802"
	I0620 17:02:52.574712    8431 addons.go:234] Setting addon ingress=true in "addons-705802"
	I0620 17:02:52.583521    8431 cli_runner.go:164] Run: docker container inspect addons-705802 --format={{.State.Status}}
	I0620 17:02:52.522583    8431 addons.go:69] Setting metrics-server=true in profile "addons-705802"
	I0620 17:02:52.599094    8431 addons.go:234] Setting addon metrics-server=true in "addons-705802"
	I0620 17:02:52.599126    8431 host.go:66] Checking if "addons-705802" exists ...
	I0620 17:02:52.599592    8431 cli_runner.go:164] Run: docker container inspect addons-705802 --format={{.State.Status}}
	I0620 17:02:52.522586    8431 addons.go:69] Setting inspektor-gadget=true in profile "addons-705802"
	I0620 17:02:52.631698    8431 addons.go:234] Setting addon inspektor-gadget=true in "addons-705802"
	I0620 17:02:52.631743    8431 host.go:66] Checking if "addons-705802" exists ...
	I0620 17:02:52.632203    8431 cli_runner.go:164] Run: docker container inspect addons-705802 --format={{.State.Status}}
	I0620 17:02:52.632362    8431 config.go:182] Loaded profile config "addons-705802": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.30.2
	I0620 17:02:52.632607    8431 cli_runner.go:164] Run: docker container inspect addons-705802 --format={{.State.Status}}
	I0620 17:02:52.649251    8431 out.go:177]   - Using image docker.io/marcnuri/yakd:0.0.4
	I0620 17:02:52.652885    8431 out.go:177]   - Using image nvcr.io/nvidia/k8s-device-plugin:v0.15.0
	I0620 17:02:52.652949    8431 addons.go:431] installing /etc/kubernetes/addons/yakd-ns.yaml
	I0620 17:02:52.652965    8431 ssh_runner.go:362] scp yakd/yakd-ns.yaml --> /etc/kubernetes/addons/yakd-ns.yaml (171 bytes)
	I0620 17:02:52.653030    8431 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-705802
	I0620 17:02:52.660568    8431 out.go:177]   - Using image gcr.io/cloud-spanner-emulator/emulator:1.5.17
	I0620 17:02:52.660840    8431 out.go:177]   - Using image docker.io/registry:2.8.3
	I0620 17:02:52.661107    8431 addons.go:431] installing /etc/kubernetes/addons/nvidia-device-plugin.yaml
	I0620 17:02:52.661123    8431 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/nvidia-device-plugin.yaml (1966 bytes)
	I0620 17:02:52.663945    8431 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-705802
	I0620 17:02:52.594188    8431 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0620 17:02:52.573618    8431 addons.go:234] Setting addon ingress-dns=true in "addons-705802"
	I0620 17:02:52.665691    8431 host.go:66] Checking if "addons-705802" exists ...
	I0620 17:02:52.594700    8431 cli_runner.go:164] Run: docker container inspect addons-705802 --format={{.State.Status}}
	I0620 17:02:52.599065    8431 host.go:66] Checking if "addons-705802" exists ...
	I0620 17:02:52.672579    8431 addons.go:431] installing /etc/kubernetes/addons/deployment.yaml
	I0620 17:02:52.672599    8431 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/deployment.yaml (1004 bytes)
	I0620 17:02:52.672665    8431 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-705802
	I0620 17:02:52.673310    8431 cli_runner.go:164] Run: docker container inspect addons-705802 --format={{.State.Status}}
	I0620 17:02:52.708406    8431 out.go:177]   - Using image gcr.io/k8s-minikube/kube-registry-proxy:0.0.6
	I0620 17:02:52.711168    8431 addons.go:431] installing /etc/kubernetes/addons/registry-rc.yaml
	I0620 17:02:52.711190    8431 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-rc.yaml (798 bytes)
	I0620 17:02:52.711252    8431 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-705802
	I0620 17:02:52.725215    8431 cli_runner.go:164] Run: docker container inspect addons-705802 --format={{.State.Status}}
	I0620 17:02:52.761065    8431 addons.go:234] Setting addon storage-provisioner-rancher=true in "addons-705802"
	I0620 17:02:52.761106    8431 host.go:66] Checking if "addons-705802" exists ...
	I0620 17:02:52.761511    8431 cli_runner.go:164] Run: docker container inspect addons-705802 --format={{.State.Status}}
	I0620 17:02:52.783137    8431 out.go:177]   - Using image registry.k8s.io/sig-storage/hostpathplugin:v1.9.0
	I0620 17:02:52.787339    8431 out.go:177]   - Using image registry.k8s.io/sig-storage/livenessprobe:v2.8.0
	I0620 17:02:52.790325    8431 out.go:177]   - Using image registry.k8s.io/sig-storage/csi-resizer:v1.6.0
	I0620 17:02:52.792955    8431 out.go:177]   - Using image registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0
	I0620 17:02:52.796344    8431 out.go:177]   - Using image registry.k8s.io/sig-storage/snapshot-controller:v6.1.0
	I0620 17:02:52.798288    8431 out.go:177]   - Using image docker.io/volcanosh/vc-webhook-manager:v1.7.0
	I0620 17:02:52.798886    8431 out.go:177]   - Using image gcr.io/k8s-minikube/storage-provisioner:v5
	I0620 17:02:52.799800    8431 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml
	I0620 17:02:52.799816    8431 ssh_runner.go:362] scp volumesnapshots/csi-hostpath-snapshotclass.yaml --> /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml (934 bytes)
	I0620 17:02:52.799883    8431 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-705802
	I0620 17:02:52.823688    8431 host.go:66] Checking if "addons-705802" exists ...
	I0620 17:02:52.828207    8431 out.go:177]   - Using image docker.io/volcanosh/vc-controller-manager:v1.7.0
	I0620 17:02:52.828343    8431 out.go:177]   - Using image registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
	I0620 17:02:52.831098    8431 out.go:177]   - Using image docker.io/volcanosh/vc-scheduler:v1.7.0
	I0620 17:02:52.831584    8431 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner.yaml
	I0620 17:02:52.831601    8431 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
	I0620 17:02:52.831684    8431 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-705802
	I0620 17:02:52.841911    8431 out.go:177]   - Using image registry.k8s.io/sig-storage/csi-attacher:v4.0.0
	I0620 17:02:52.846075    8431 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/addons-705802/id_rsa Username:docker}
	I0620 17:02:52.847625    8431 addons.go:234] Setting addon default-storageclass=true in "addons-705802"
	I0620 17:02:52.847659    8431 host.go:66] Checking if "addons-705802" exists ...
	I0620 17:02:52.848068    8431 cli_runner.go:164] Run: docker container inspect addons-705802 --format={{.State.Status}}
	I0620 17:02:52.850128    8431 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/addons-705802/id_rsa Username:docker}
	I0620 17:02:52.899312    8431 out.go:177]   - Using image gcr.io/k8s-minikube/minikube-ingress-dns:0.0.2
	I0620 17:02:52.924770    8431 addons.go:431] installing /etc/kubernetes/addons/ingress-dns-pod.yaml
	I0620 17:02:52.924827    8431 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-dns-pod.yaml (2442 bytes)
	I0620 17:02:52.924908    8431 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-705802
	I0620 17:02:52.938112    8431 addons.go:431] installing /etc/kubernetes/addons/volcano-deployment.yaml
	I0620 17:02:52.938191    8431 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/volcano-deployment.yaml (626760 bytes)
	I0620 17:02:52.938305    8431 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-705802
	I0620 17:02:52.900089    8431 out.go:177]   - Using image ghcr.io/inspektor-gadget/inspektor-gadget:v0.29.0
	I0620 17:02:52.900095    8431 out.go:177]   - Using image registry.k8s.io/metrics-server/metrics-server:v0.7.1
	I0620 17:02:52.954963    8431 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/addons-705802/id_rsa Username:docker}
	I0620 17:02:52.955812    8431 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/addons-705802/id_rsa Username:docker}
	I0620 17:02:52.960287    8431 out.go:177]   - Using image docker.io/rancher/local-path-provisioner:v0.0.22
	I0620 17:02:52.960646    8431 addons.go:431] installing /etc/kubernetes/addons/metrics-apiservice.yaml
	I0620 17:02:52.960668    8431 ssh_runner.go:362] scp metrics-server/metrics-apiservice.yaml --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
	I0620 17:02:52.960733    8431 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-705802
	I0620 17:02:52.971748    8431 out.go:177]   - Using image registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.7.0
	I0620 17:02:52.972527    8431 addons.go:431] installing /etc/kubernetes/addons/ig-namespace.yaml
	I0620 17:02:52.972546    8431 ssh_runner.go:362] scp inspektor-gadget/ig-namespace.yaml --> /etc/kubernetes/addons/ig-namespace.yaml (55 bytes)
	I0620 17:02:52.972607    8431 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-705802
	I0620 17:02:52.981956    8431 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/addons-705802/id_rsa Username:docker}
	I0620 17:02:52.982536    8431 out.go:177]   - Using image docker.io/busybox:stable
	I0620 17:02:52.982702    8431 out.go:177]   - Using image registry.k8s.io/ingress-nginx/controller:v1.10.1
	I0620 17:02:52.982740    8431 out.go:177]   - Using image registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.6.0
	I0620 17:02:52.984464    8431 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner-rancher.yaml
	I0620 17:02:52.984483    8431 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner-rancher.yaml (3113 bytes)
	I0620 17:02:52.984546    8431 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-705802
	I0620 17:02:53.002024    8431 addons.go:431] installing /etc/kubernetes/addons/rbac-external-attacher.yaml
	I0620 17:02:53.002063    8431 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-attacher.yaml --> /etc/kubernetes/addons/rbac-external-attacher.yaml (3073 bytes)
	I0620 17:02:53.002133    8431 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-705802
	I0620 17:02:53.004599    8431 out.go:177]   - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.1
	I0620 17:02:53.005061    8431 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/addons-705802/id_rsa Username:docker}
	I0620 17:02:53.011200    8431 out.go:177]   - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.1
	I0620 17:02:53.014273    8431 addons.go:431] installing /etc/kubernetes/addons/ingress-deploy.yaml
	I0620 17:02:53.014297    8431 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-deploy.yaml (16078 bytes)
	I0620 17:02:53.014375    8431 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-705802
	I0620 17:02:53.033075    8431 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/addons-705802/id_rsa Username:docker}
	I0620 17:02:53.052394    8431 addons.go:431] installing /etc/kubernetes/addons/storageclass.yaml
	I0620 17:02:53.052418    8431 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
	I0620 17:02:53.052483    8431 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-705802
	I0620 17:02:53.090965    8431 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/addons-705802/id_rsa Username:docker}
	I0620 17:02:53.109855    8431 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/addons-705802/id_rsa Username:docker}
	I0620 17:02:53.144959    8431 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/addons-705802/id_rsa Username:docker}
	I0620 17:02:53.147340    8431 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/addons-705802/id_rsa Username:docker}
	I0620 17:02:53.155483    8431 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/addons-705802/id_rsa Username:docker}
	I0620 17:02:53.167233    8431 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/addons-705802/id_rsa Username:docker}
	I0620 17:02:53.169927    8431 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/addons-705802/id_rsa Username:docker}
	I0620 17:02:53.249091    8431 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.30.2/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^        forward . \/etc\/resolv.conf.*/i \        hosts {\n           192.168.49.1 host.minikube.internal\n           fallthrough\n        }' -e '/^        errors *$/i \        log' | sudo /var/lib/minikube/binaries/v1.30.2/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
	I0620 17:02:53.281765    8431 ssh_runner.go:195] Run: sudo systemctl start kubelet
	I0620 17:02:53.734618    8431 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.2/kubectl apply -f /etc/kubernetes/addons/deployment.yaml
	I0620 17:02:53.816695    8431 addons.go:431] installing /etc/kubernetes/addons/registry-svc.yaml
	I0620 17:02:53.816775    8431 ssh_runner.go:362] scp registry/registry-svc.yaml --> /etc/kubernetes/addons/registry-svc.yaml (398 bytes)
	I0620 17:02:53.844086    8431 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml
	I0620 17:02:53.844161    8431 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshotclasses.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml (6471 bytes)
	I0620 17:02:53.907615    8431 addons.go:431] installing /etc/kubernetes/addons/rbac-hostpath.yaml
	I0620 17:02:53.907694    8431 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-hostpath.yaml --> /etc/kubernetes/addons/rbac-hostpath.yaml (4266 bytes)
	I0620 17:02:53.919722    8431 addons.go:431] installing /etc/kubernetes/addons/yakd-sa.yaml
	I0620 17:02:53.919808    8431 ssh_runner.go:362] scp yakd/yakd-sa.yaml --> /etc/kubernetes/addons/yakd-sa.yaml (247 bytes)
	I0620 17:02:54.002697    8431 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.2/kubectl apply -f /etc/kubernetes/addons/volcano-deployment.yaml
	I0620 17:02:54.012450    8431 addons.go:431] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
	I0620 17:02:54.012531    8431 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1907 bytes)
	I0620 17:02:54.035028    8431 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.2/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml
	I0620 17:02:54.044590    8431 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.2/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml
	I0620 17:02:54.091315    8431 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.2/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
	I0620 17:02:54.144259    8431 addons.go:431] installing /etc/kubernetes/addons/registry-proxy.yaml
	I0620 17:02:54.144321    8431 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-proxy.yaml (947 bytes)
	I0620 17:02:54.147855    8431 addons.go:431] installing /etc/kubernetes/addons/ig-serviceaccount.yaml
	I0620 17:02:54.147940    8431 ssh_runner.go:362] scp inspektor-gadget/ig-serviceaccount.yaml --> /etc/kubernetes/addons/ig-serviceaccount.yaml (80 bytes)
	I0620 17:02:54.162435    8431 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.2/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml
	I0620 17:02:54.193039    8431 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.2/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
	I0620 17:02:54.206553    8431 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.2/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml
	I0620 17:02:54.223265    8431 addons.go:431] installing /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml
	I0620 17:02:54.223338    8431 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-health-monitor-controller.yaml --> /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml (3038 bytes)
	I0620 17:02:54.229488    8431 addons.go:431] installing /etc/kubernetes/addons/yakd-crb.yaml
	I0620 17:02:54.229637    8431 ssh_runner.go:362] scp yakd/yakd-crb.yaml --> /etc/kubernetes/addons/yakd-crb.yaml (422 bytes)
	I0620 17:02:54.251692    8431 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml
	I0620 17:02:54.251778    8431 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshotcontents.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml (23126 bytes)
	I0620 17:02:54.321436    8431 addons.go:431] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
	I0620 17:02:54.321513    8431 ssh_runner.go:362] scp metrics-server/metrics-server-rbac.yaml --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
	I0620 17:02:54.406212    8431 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.2/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml
	I0620 17:02:54.409256    8431 addons.go:431] installing /etc/kubernetes/addons/ig-role.yaml
	I0620 17:02:54.409326    8431 ssh_runner.go:362] scp inspektor-gadget/ig-role.yaml --> /etc/kubernetes/addons/ig-role.yaml (210 bytes)
	I0620 17:02:54.413726    8431 addons.go:431] installing /etc/kubernetes/addons/yakd-svc.yaml
	I0620 17:02:54.413800    8431 ssh_runner.go:362] scp yakd/yakd-svc.yaml --> /etc/kubernetes/addons/yakd-svc.yaml (412 bytes)
	I0620 17:02:54.500839    8431 addons.go:431] installing /etc/kubernetes/addons/rbac-external-provisioner.yaml
	I0620 17:02:54.500914    8431 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-provisioner.yaml --> /etc/kubernetes/addons/rbac-external-provisioner.yaml (4442 bytes)
	I0620 17:02:54.513566    8431 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml
	I0620 17:02:54.513637    8431 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshots.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml (19582 bytes)
	I0620 17:02:54.540897    8431 addons.go:431] installing /etc/kubernetes/addons/metrics-server-service.yaml
	I0620 17:02:54.540970    8431 ssh_runner.go:362] scp metrics-server/metrics-server-service.yaml --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
	I0620 17:02:54.651827    8431 addons.go:431] installing /etc/kubernetes/addons/ig-rolebinding.yaml
	I0620 17:02:54.651909    8431 ssh_runner.go:362] scp inspektor-gadget/ig-rolebinding.yaml --> /etc/kubernetes/addons/ig-rolebinding.yaml (244 bytes)
	I0620 17:02:54.821417    8431 addons.go:431] installing /etc/kubernetes/addons/rbac-external-resizer.yaml
	I0620 17:02:54.821500    8431 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-resizer.yaml --> /etc/kubernetes/addons/rbac-external-resizer.yaml (2943 bytes)
	I0620 17:02:54.825953    8431 addons.go:431] installing /etc/kubernetes/addons/yakd-dp.yaml
	I0620 17:02:54.826058    8431 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/yakd-dp.yaml (2017 bytes)
	I0620 17:02:54.879840    8431 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.2/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
	I0620 17:02:54.943021    8431 addons.go:431] installing /etc/kubernetes/addons/rbac-external-snapshotter.yaml
	I0620 17:02:54.943097    8431 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-snapshotter.yaml --> /etc/kubernetes/addons/rbac-external-snapshotter.yaml (3149 bytes)
	I0620 17:02:54.949000    8431 addons.go:431] installing /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml
	I0620 17:02:54.949076    8431 ssh_runner.go:362] scp volumesnapshots/rbac-volume-snapshot-controller.yaml --> /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml (3545 bytes)
	I0620 17:02:54.953084    8431 addons.go:431] installing /etc/kubernetes/addons/ig-clusterrole.yaml
	I0620 17:02:54.953155    8431 ssh_runner.go:362] scp inspektor-gadget/ig-clusterrole.yaml --> /etc/kubernetes/addons/ig-clusterrole.yaml (1485 bytes)
	I0620 17:02:54.999642    8431 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.2/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml
	I0620 17:02:55.109344    8431 addons.go:431] installing /etc/kubernetes/addons/ig-clusterrolebinding.yaml
	I0620 17:02:55.109421    8431 ssh_runner.go:362] scp inspektor-gadget/ig-clusterrolebinding.yaml --> /etc/kubernetes/addons/ig-clusterrolebinding.yaml (274 bytes)
	I0620 17:02:55.128288    8431 addons.go:431] installing /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
	I0620 17:02:55.128363    8431 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml (1475 bytes)
	I0620 17:02:55.129164    8431 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-attacher.yaml
	I0620 17:02:55.129232    8431 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-attacher.yaml (2143 bytes)
	I0620 17:02:55.306815    8431 addons.go:431] installing /etc/kubernetes/addons/ig-crd.yaml
	I0620 17:02:55.306890    8431 ssh_runner.go:362] scp inspektor-gadget/ig-crd.yaml --> /etc/kubernetes/addons/ig-crd.yaml (5216 bytes)
	I0620 17:02:55.316535    8431 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml
	I0620 17:02:55.316608    8431 ssh_runner.go:362] scp csi-hostpath-driver/deploy/csi-hostpath-driverinfo.yaml --> /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml (1274 bytes)
	I0620 17:02:55.341169    8431 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-plugin.yaml
	I0620 17:02:55.341238    8431 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-plugin.yaml (8201 bytes)
	I0620 17:02:55.360544    8431 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.2/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
	I0620 17:02:55.608671    8431 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.30.2/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^        forward . \/etc\/resolv.conf.*/i \        hosts {\n           192.168.49.1 host.minikube.internal\n           fallthrough\n        }' -e '/^        errors *$/i \        log' | sudo /var/lib/minikube/binaries/v1.30.2/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -": (2.359539261s)
	I0620 17:02:55.608701    8431 start.go:946] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
	I0620 17:02:55.609648    8431 ssh_runner.go:235] Completed: sudo systemctl start kubelet: (2.327855007s)
	I0620 17:02:55.610374    8431 node_ready.go:35] waiting up to 6m0s for node "addons-705802" to be "Ready" ...
	I0620 17:02:55.615525    8431 node_ready.go:49] node "addons-705802" has status "Ready":"True"
	I0620 17:02:55.615550    8431 node_ready.go:38] duration metric: took 5.150949ms for node "addons-705802" to be "Ready" ...
	I0620 17:02:55.615561    8431 pod_ready.go:35] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
	I0620 17:02:55.634241    8431 pod_ready.go:78] waiting up to 6m0s for pod "coredns-7db6d8ff4d-kdjpc" in "kube-system" namespace to be "Ready" ...
	I0620 17:02:55.646897    8431 pod_ready.go:92] pod "coredns-7db6d8ff4d-kdjpc" in "kube-system" namespace has status "Ready":"True"
	I0620 17:02:55.646922    8431 pod_ready.go:81] duration metric: took 12.645279ms for pod "coredns-7db6d8ff4d-kdjpc" in "kube-system" namespace to be "Ready" ...
	I0620 17:02:55.646935    8431 pod_ready.go:78] waiting up to 6m0s for pod "coredns-7db6d8ff4d-xx9rs" in "kube-system" namespace to be "Ready" ...
	I0620 17:02:55.679539    8431 pod_ready.go:92] pod "coredns-7db6d8ff4d-xx9rs" in "kube-system" namespace has status "Ready":"True"
	I0620 17:02:55.679565    8431 pod_ready.go:81] duration metric: took 32.622469ms for pod "coredns-7db6d8ff4d-xx9rs" in "kube-system" namespace to be "Ready" ...
	I0620 17:02:55.679578    8431 pod_ready.go:78] waiting up to 6m0s for pod "etcd-addons-705802" in "kube-system" namespace to be "Ready" ...
	I0620 17:02:55.696494    8431 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-resizer.yaml
	I0620 17:02:55.696518    8431 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-resizer.yaml (2191 bytes)
	I0620 17:02:55.710296    8431 pod_ready.go:92] pod "etcd-addons-705802" in "kube-system" namespace has status "Ready":"True"
	I0620 17:02:55.710322    8431 pod_ready.go:81] duration metric: took 30.736326ms for pod "etcd-addons-705802" in "kube-system" namespace to be "Ready" ...
	I0620 17:02:55.710335    8431 pod_ready.go:78] waiting up to 6m0s for pod "kube-apiserver-addons-705802" in "kube-system" namespace to be "Ready" ...
	I0620 17:02:55.716821    8431 addons.go:431] installing /etc/kubernetes/addons/ig-daemonset.yaml
	I0620 17:02:55.716846    8431 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-daemonset.yaml (7735 bytes)
	I0620 17:02:55.725698    8431 pod_ready.go:92] pod "kube-apiserver-addons-705802" in "kube-system" namespace has status "Ready":"True"
	I0620 17:02:55.725725    8431 pod_ready.go:81] duration metric: took 15.373365ms for pod "kube-apiserver-addons-705802" in "kube-system" namespace to be "Ready" ...
	I0620 17:02:55.725737    8431 pod_ready.go:78] waiting up to 6m0s for pod "kube-controller-manager-addons-705802" in "kube-system" namespace to be "Ready" ...
	I0620 17:02:55.894467    8431 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.2/kubectl apply -f /etc/kubernetes/addons/ig-namespace.yaml -f /etc/kubernetes/addons/ig-serviceaccount.yaml -f /etc/kubernetes/addons/ig-role.yaml -f /etc/kubernetes/addons/ig-rolebinding.yaml -f /etc/kubernetes/addons/ig-clusterrole.yaml -f /etc/kubernetes/addons/ig-clusterrolebinding.yaml -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-daemonset.yaml
	I0620 17:02:56.014929    8431 pod_ready.go:92] pod "kube-controller-manager-addons-705802" in "kube-system" namespace has status "Ready":"True"
	I0620 17:02:56.014961    8431 pod_ready.go:81] duration metric: took 289.214896ms for pod "kube-controller-manager-addons-705802" in "kube-system" namespace to be "Ready" ...
	I0620 17:02:56.014975    8431 pod_ready.go:78] waiting up to 6m0s for pod "kube-proxy-5znkp" in "kube-system" namespace to be "Ready" ...
	I0620 17:02:56.050529    8431 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
	I0620 17:02:56.050556    8431 ssh_runner.go:362] scp csi-hostpath-driver/deploy/csi-hostpath-storageclass.yaml --> /etc/kubernetes/addons/csi-hostpath-storageclass.yaml (846 bytes)
	I0620 17:02:56.112583    8431 kapi.go:248] "coredns" deployment in "kube-system" namespace and "addons-705802" context rescaled to 1 replicas
	I0620 17:02:56.342124    8431 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.2/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
	I0620 17:02:56.399321    8431 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.2/kubectl apply -f /etc/kubernetes/addons/deployment.yaml: (2.664623407s)
	I0620 17:02:56.427211    8431 pod_ready.go:92] pod "kube-proxy-5znkp" in "kube-system" namespace has status "Ready":"True"
	I0620 17:02:56.427235    8431 pod_ready.go:81] duration metric: took 412.251528ms for pod "kube-proxy-5znkp" in "kube-system" namespace to be "Ready" ...
	I0620 17:02:56.427247    8431 pod_ready.go:78] waiting up to 6m0s for pod "kube-scheduler-addons-705802" in "kube-system" namespace to be "Ready" ...
	I0620 17:02:56.820998    8431 pod_ready.go:92] pod "kube-scheduler-addons-705802" in "kube-system" namespace has status "Ready":"True"
	I0620 17:02:56.821025    8431 pod_ready.go:81] duration metric: took 393.769274ms for pod "kube-scheduler-addons-705802" in "kube-system" namespace to be "Ready" ...
	I0620 17:02:56.821037    8431 pod_ready.go:78] waiting up to 6m0s for pod "nvidia-device-plugin-daemonset-kxmqr" in "kube-system" namespace to be "Ready" ...
	I0620 17:02:58.829038    8431 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-kxmqr" in "kube-system" namespace has status "Ready":"False"
	I0620 17:02:59.861686    8431 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_application_credentials.json (162 bytes)
	I0620 17:02:59.861762    8431 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-705802
	I0620 17:02:59.886905    8431 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/addons-705802/id_rsa Username:docker}
	I0620 17:03:00.830970    8431 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-kxmqr" in "kube-system" namespace has status "Ready":"False"
	I0620 17:03:01.049697    8431 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_cloud_project (12 bytes)
	I0620 17:03:01.245910    8431 addons.go:234] Setting addon gcp-auth=true in "addons-705802"
	I0620 17:03:01.245962    8431 host.go:66] Checking if "addons-705802" exists ...
	I0620 17:03:01.246421    8431 cli_runner.go:164] Run: docker container inspect addons-705802 --format={{.State.Status}}
	I0620 17:03:01.268515    8431 ssh_runner.go:195] Run: cat /var/lib/minikube/google_application_credentials.json
	I0620 17:03:01.268568    8431 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-705802
	I0620 17:03:01.294698    8431 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/addons-705802/id_rsa Username:docker}
	I0620 17:03:03.327273    8431 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-kxmqr" in "kube-system" namespace has status "Ready":"False"
	I0620 17:03:04.947211    8431 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.2/kubectl apply -f /etc/kubernetes/addons/volcano-deployment.yaml: (10.944438154s)
	I0620 17:03:04.947271    8431 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.2/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml: (10.912154773s)
	I0620 17:03:04.947309    8431 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.2/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml: (10.902649316s)
	I0620 17:03:04.947380    8431 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.2/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (10.855992985s)
	I0620 17:03:04.947432    8431 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.2/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml: (10.784935616s)
	I0620 17:03:04.947627    8431 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.2/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: (10.754518537s)
	I0620 17:03:04.947780    8431 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.2/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml: (10.741171611s)
	I0620 17:03:04.947815    8431 addons.go:475] Verifying addon ingress=true in "addons-705802"
	I0620 17:03:04.948005    8431 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.2/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml: (10.541719869s)
	I0620 17:03:04.948084    8431 addons.go:475] Verifying addon registry=true in "addons-705802"
	I0620 17:03:04.948281    8431 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.2/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (10.068362175s)
	I0620 17:03:04.948302    8431 addons.go:475] Verifying addon metrics-server=true in "addons-705802"
	I0620 17:03:04.948341    8431 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.2/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml: (9.94862737s)
	I0620 17:03:04.948594    8431 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.2/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (9.587972449s)
	W0620 17:03:04.948854    8431 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.2/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
	stdout:
	customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
	customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
	customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
	serviceaccount/snapshot-controller created
	clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
	clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
	role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
	rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
	deployment.apps/snapshot-controller created
	
	stderr:
	error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
	ensure CRDs are installed first
	I0620 17:03:04.948880    8431 retry.go:31] will retry after 205.774043ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.2/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
	stdout:
	customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
	customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
	customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
	serviceaccount/snapshot-controller created
	clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
	clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
	role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
	rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
	deployment.apps/snapshot-controller created
	
	stderr:
	error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
	ensure CRDs are installed first
	I0620 17:03:04.948676    8431 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.2/kubectl apply -f /etc/kubernetes/addons/ig-namespace.yaml -f /etc/kubernetes/addons/ig-serviceaccount.yaml -f /etc/kubernetes/addons/ig-role.yaml -f /etc/kubernetes/addons/ig-rolebinding.yaml -f /etc/kubernetes/addons/ig-clusterrole.yaml -f /etc/kubernetes/addons/ig-clusterrolebinding.yaml -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-daemonset.yaml: (9.05417726s)
	I0620 17:03:04.950695    8431 out.go:177] * To access YAKD - Kubernetes Dashboard, wait for Pod to be ready and run the following command:
	
		minikube -p addons-705802 service yakd-dashboard -n yakd-dashboard
	
	I0620 17:03:04.950766    8431 out.go:177] * Verifying ingress addon...
	I0620 17:03:04.950706    8431 out.go:177] * Verifying registry addon...
	I0620 17:03:04.954608    8431 kapi.go:75] Waiting for pod with label "app.kubernetes.io/name=ingress-nginx" in ns "ingress-nginx" ...
	I0620 17:03:04.955491    8431 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=registry" in ns "kube-system" ...
	I0620 17:03:04.965072    8431 kapi.go:86] Found 3 Pods for label selector app.kubernetes.io/name=ingress-nginx
	I0620 17:03:04.965138    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	W0620 17:03:04.968908    8431 out.go:239] ! Enabling 'default-storageclass' returned an error: running callbacks: [Error making standard the default storage class: Error while marking storage class local-path as non-default: Operation cannot be fulfilled on storageclasses.storage.k8s.io "local-path": the object has been modified; please apply your changes to the latest version and try again]
	I0620 17:03:04.979832    8431 kapi.go:86] Found 2 Pods for label selector kubernetes.io/minikube-addons=registry
	I0620 17:03:04.979855    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:05.155285    8431 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.2/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
	I0620 17:03:05.329036    8431 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-kxmqr" in "kube-system" namespace has status "Ready":"False"
	I0620 17:03:05.510630    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:05.539261    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:05.676880    8431 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.2/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml: (9.334697642s)
	I0620 17:03:05.676954    8431 addons.go:475] Verifying addon csi-hostpath-driver=true in "addons-705802"
	I0620 17:03:05.677156    8431 ssh_runner.go:235] Completed: cat /var/lib/minikube/google_application_credentials.json: (4.40861912s)
	I0620 17:03:05.679680    8431 out.go:177] * Verifying csi-hostpath-driver addon...
	I0620 17:03:05.679781    8431 out.go:177]   - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.1
	I0620 17:03:05.682143    8431 out.go:177]   - Using image gcr.io/k8s-minikube/gcp-auth-webhook:v0.1.2
	I0620 17:03:05.682975    8431 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=csi-hostpath-driver" in ns "kube-system" ...
	I0620 17:03:05.684692    8431 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-ns.yaml
	I0620 17:03:05.684791    8431 ssh_runner.go:362] scp gcp-auth/gcp-auth-ns.yaml --> /etc/kubernetes/addons/gcp-auth-ns.yaml (700 bytes)
	I0620 17:03:05.689730    8431 kapi.go:86] Found 3 Pods for label selector kubernetes.io/minikube-addons=csi-hostpath-driver
	I0620 17:03:05.689804    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:05.774573    8431 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-service.yaml
	I0620 17:03:05.774637    8431 ssh_runner.go:362] scp gcp-auth/gcp-auth-service.yaml --> /etc/kubernetes/addons/gcp-auth-service.yaml (788 bytes)
	I0620 17:03:05.817484    8431 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-webhook.yaml
	I0620 17:03:05.817508    8431 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/gcp-auth-webhook.yaml (5421 bytes)
	I0620 17:03:05.938414    8431 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.2/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml
	I0620 17:03:05.961688    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:05.964334    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:06.190111    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:06.459540    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:06.462630    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:06.689427    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:06.962024    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:06.963263    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:07.189379    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:07.474110    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:07.474763    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:07.573198    8431 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.2/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (2.417828803s)
	I0620 17:03:07.573302    8431 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.2/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml: (1.634803686s)
	I0620 17:03:07.576025    8431 addons.go:475] Verifying addon gcp-auth=true in "addons-705802"
	I0620 17:03:07.578130    8431 out.go:177] * Verifying gcp-auth addon...
	I0620 17:03:07.582125    8431 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=gcp-auth" in ns "gcp-auth" ...
	I0620 17:03:07.587709    8431 kapi.go:86] Found 0 Pods for label selector kubernetes.io/minikube-addons=gcp-auth
	I0620 17:03:07.688967    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:07.828408    8431 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-kxmqr" in "kube-system" namespace has status "Ready":"False"
	I0620 17:03:07.959578    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:07.962658    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:08.190140    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:08.462303    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:08.463198    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:08.689394    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:08.962116    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:08.963071    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:09.189191    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:09.327046    8431 pod_ready.go:92] pod "nvidia-device-plugin-daemonset-kxmqr" in "kube-system" namespace has status "Ready":"True"
	I0620 17:03:09.327072    8431 pod_ready.go:81] duration metric: took 12.506026399s for pod "nvidia-device-plugin-daemonset-kxmqr" in "kube-system" namespace to be "Ready" ...
	I0620 17:03:09.327083    8431 pod_ready.go:38] duration metric: took 13.711510662s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
	I0620 17:03:09.327100    8431 api_server.go:52] waiting for apiserver process to appear ...
	I0620 17:03:09.327163    8431 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I0620 17:03:09.341777    8431 api_server.go:72] duration metric: took 16.82849621s to wait for apiserver process to appear ...
	I0620 17:03:09.341803    8431 api_server.go:88] waiting for apiserver healthz status ...
	I0620 17:03:09.341823    8431 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
	I0620 17:03:09.349505    8431 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
	ok
	I0620 17:03:09.350479    8431 api_server.go:141] control plane version: v1.30.2
	I0620 17:03:09.350502    8431 api_server.go:131] duration metric: took 8.691887ms to wait for apiserver health ...
	I0620 17:03:09.350511    8431 system_pods.go:43] waiting for kube-system pods to appear ...
	I0620 17:03:09.361471    8431 system_pods.go:59] 17 kube-system pods found
	I0620 17:03:09.361506    8431 system_pods.go:61] "coredns-7db6d8ff4d-xx9rs" [b1627cff-eee2-4af6-afeb-b4d96270e069] Running
	I0620 17:03:09.361515    8431 system_pods.go:61] "csi-hostpath-attacher-0" [25a35cd3-abd9-4197-a0a3-47dffa62d473] Pending / Ready:ContainersNotReady (containers with unready status: [csi-attacher]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-attacher])
	I0620 17:03:09.361528    8431 system_pods.go:61] "csi-hostpath-resizer-0" [ef551071-8b5e-4a8f-8c7c-c706852d76fd] Pending / Ready:ContainersNotReady (containers with unready status: [csi-resizer]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-resizer])
	I0620 17:03:09.361536    8431 system_pods.go:61] "csi-hostpathplugin-vhr4j" [3b5ddc08-3128-4fbe-ba66-ad818b351bb3] Pending / Ready:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter])
	I0620 17:03:09.361545    8431 system_pods.go:61] "etcd-addons-705802" [cbac8784-641d-45fa-bebf-e1780acd3029] Running
	I0620 17:03:09.361550    8431 system_pods.go:61] "kube-apiserver-addons-705802" [6c9f8d1d-a307-44d4-92fd-94148626755d] Running
	I0620 17:03:09.361566    8431 system_pods.go:61] "kube-controller-manager-addons-705802" [3d888b87-3d87-4c8f-ac82-ea2a8308c271] Running
	I0620 17:03:09.361575    8431 system_pods.go:61] "kube-ingress-dns-minikube" [3e89464b-3923-41d9-aa53-c4c00c815b36] Pending / Ready:ContainersNotReady (containers with unready status: [minikube-ingress-dns]) / ContainersReady:ContainersNotReady (containers with unready status: [minikube-ingress-dns])
	I0620 17:03:09.361587    8431 system_pods.go:61] "kube-proxy-5znkp" [2e7dceb2-ea7b-47c0-8b9c-8b01c1233910] Running
	I0620 17:03:09.361594    8431 system_pods.go:61] "kube-scheduler-addons-705802" [8f616426-3ea1-447e-a3f6-71fd0e1661b5] Running
	I0620 17:03:09.361599    8431 system_pods.go:61] "metrics-server-c59844bb4-q78st" [c7a25cfe-a4af-4cb4-abb1-0099ff026057] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
	I0620 17:03:09.361604    8431 system_pods.go:61] "nvidia-device-plugin-daemonset-kxmqr" [3f82c240-bb27-4a72-a3f6-7af8eb39a770] Running
	I0620 17:03:09.361621    8431 system_pods.go:61] "registry-proxy-prpxx" [b1d2e1a6-ebd8-4490-94d7-be9a7e2a1920] Pending / Ready:ContainersNotReady (containers with unready status: [registry-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [registry-proxy])
	I0620 17:03:09.361626    8431 system_pods.go:61] "registry-xkjrc" [e8fb038a-058a-4a6e-9936-dd674eeb2189] Pending / Ready:ContainersNotReady (containers with unready status: [registry]) / ContainersReady:ContainersNotReady (containers with unready status: [registry])
	I0620 17:03:09.361632    8431 system_pods.go:61] "snapshot-controller-745499f584-4b4kb" [06b1bea9-035f-4060-aec1-e44054c29e19] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
	I0620 17:03:09.361643    8431 system_pods.go:61] "snapshot-controller-745499f584-p65kg" [4e15d845-866d-4016-844c-195b6cd467f4] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
	I0620 17:03:09.361660    8431 system_pods.go:61] "storage-provisioner" [e6f8d5d7-4787-4937-b437-5abd495907ff] Running
	I0620 17:03:09.361672    8431 system_pods.go:74] duration metric: took 11.156579ms to wait for pod list to return data ...
	I0620 17:03:09.361686    8431 default_sa.go:34] waiting for default service account to be created ...
	I0620 17:03:09.364061    8431 default_sa.go:45] found service account: "default"
	I0620 17:03:09.364087    8431 default_sa.go:55] duration metric: took 2.39403ms for default service account to be created ...
	I0620 17:03:09.364096    8431 system_pods.go:116] waiting for k8s-apps to be running ...
	I0620 17:03:09.375315    8431 system_pods.go:86] 17 kube-system pods found
	I0620 17:03:09.375345    8431 system_pods.go:89] "coredns-7db6d8ff4d-xx9rs" [b1627cff-eee2-4af6-afeb-b4d96270e069] Running
	I0620 17:03:09.375354    8431 system_pods.go:89] "csi-hostpath-attacher-0" [25a35cd3-abd9-4197-a0a3-47dffa62d473] Pending / Ready:ContainersNotReady (containers with unready status: [csi-attacher]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-attacher])
	I0620 17:03:09.375377    8431 system_pods.go:89] "csi-hostpath-resizer-0" [ef551071-8b5e-4a8f-8c7c-c706852d76fd] Pending / Ready:ContainersNotReady (containers with unready status: [csi-resizer]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-resizer])
	I0620 17:03:09.375391    8431 system_pods.go:89] "csi-hostpathplugin-vhr4j" [3b5ddc08-3128-4fbe-ba66-ad818b351bb3] Pending / Ready:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter])
	I0620 17:03:09.375404    8431 system_pods.go:89] "etcd-addons-705802" [cbac8784-641d-45fa-bebf-e1780acd3029] Running
	I0620 17:03:09.375412    8431 system_pods.go:89] "kube-apiserver-addons-705802" [6c9f8d1d-a307-44d4-92fd-94148626755d] Running
	I0620 17:03:09.375417    8431 system_pods.go:89] "kube-controller-manager-addons-705802" [3d888b87-3d87-4c8f-ac82-ea2a8308c271] Running
	I0620 17:03:09.375429    8431 system_pods.go:89] "kube-ingress-dns-minikube" [3e89464b-3923-41d9-aa53-c4c00c815b36] Pending / Ready:ContainersNotReady (containers with unready status: [minikube-ingress-dns]) / ContainersReady:ContainersNotReady (containers with unready status: [minikube-ingress-dns])
	I0620 17:03:09.375433    8431 system_pods.go:89] "kube-proxy-5znkp" [2e7dceb2-ea7b-47c0-8b9c-8b01c1233910] Running
	I0620 17:03:09.375437    8431 system_pods.go:89] "kube-scheduler-addons-705802" [8f616426-3ea1-447e-a3f6-71fd0e1661b5] Running
	I0620 17:03:09.375448    8431 system_pods.go:89] "metrics-server-c59844bb4-q78st" [c7a25cfe-a4af-4cb4-abb1-0099ff026057] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
	I0620 17:03:09.375455    8431 system_pods.go:89] "nvidia-device-plugin-daemonset-kxmqr" [3f82c240-bb27-4a72-a3f6-7af8eb39a770] Running
	I0620 17:03:09.375473    8431 system_pods.go:89] "registry-proxy-prpxx" [b1d2e1a6-ebd8-4490-94d7-be9a7e2a1920] Pending / Ready:ContainersNotReady (containers with unready status: [registry-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [registry-proxy])
	I0620 17:03:09.375482    8431 system_pods.go:89] "registry-xkjrc" [e8fb038a-058a-4a6e-9936-dd674eeb2189] Pending / Ready:ContainersNotReady (containers with unready status: [registry]) / ContainersReady:ContainersNotReady (containers with unready status: [registry])
	I0620 17:03:09.375489    8431 system_pods.go:89] "snapshot-controller-745499f584-4b4kb" [06b1bea9-035f-4060-aec1-e44054c29e19] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
	I0620 17:03:09.375499    8431 system_pods.go:89] "snapshot-controller-745499f584-p65kg" [4e15d845-866d-4016-844c-195b6cd467f4] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
	I0620 17:03:09.375507    8431 system_pods.go:89] "storage-provisioner" [e6f8d5d7-4787-4937-b437-5abd495907ff] Running
	I0620 17:03:09.375514    8431 system_pods.go:126] duration metric: took 11.413222ms to wait for k8s-apps to be running ...
	I0620 17:03:09.375528    8431 system_svc.go:44] waiting for kubelet service to be running ....
	I0620 17:03:09.375579    8431 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I0620 17:03:09.387290    8431 system_svc.go:56] duration metric: took 11.75194ms WaitForService to wait for kubelet
	I0620 17:03:09.387319    8431 kubeadm.go:576] duration metric: took 16.874044107s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
	I0620 17:03:09.387339    8431 node_conditions.go:102] verifying NodePressure condition ...
	I0620 17:03:09.390382    8431 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
	I0620 17:03:09.390412    8431 node_conditions.go:123] node cpu capacity is 2
	I0620 17:03:09.390423    8431 node_conditions.go:105] duration metric: took 3.079638ms to run NodePressure ...
	I0620 17:03:09.390435    8431 start.go:240] waiting for startup goroutines ...
	I0620 17:03:09.462328    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:09.463538    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:09.691310    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:09.959538    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:09.961894    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:10.189184    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:10.460619    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:10.461374    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:10.688834    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:10.960755    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:10.963356    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:11.189244    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:11.460534    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:11.461673    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:11.690178    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:11.958861    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:11.959732    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:12.188708    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:12.459078    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:12.460052    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:12.689535    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:12.959174    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:12.960417    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:13.188162    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:13.460753    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:13.461313    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:13.689823    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:13.960480    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:13.961909    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:14.189535    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:14.461552    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:14.462730    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:14.689155    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:14.959453    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:14.962322    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:15.191684    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:15.459042    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:15.462366    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:15.689497    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:15.962602    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:15.963128    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:16.193578    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:16.460901    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:16.463487    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:16.690541    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:16.960411    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:16.960977    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:17.188333    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:17.461883    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:17.462380    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:17.689054    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:17.959924    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:17.961111    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:18.188883    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:18.459766    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:18.464010    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:18.690472    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:18.961174    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:18.962230    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:19.189473    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:19.458717    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:19.460959    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:19.688641    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:19.959758    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:19.960804    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:20.192268    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:20.460490    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:20.460935    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:20.688688    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:20.960137    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:20.960961    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:21.188337    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:21.466425    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:21.467076    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:21.689565    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:21.965266    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:21.966683    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:22.189284    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:22.464665    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:22.465519    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:22.689009    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:22.960666    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:22.961414    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:23.190062    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:23.461024    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:23.463781    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:23.690078    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:23.960043    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:23.963524    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:24.188694    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:24.459886    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:24.460765    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:24.689279    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:24.960038    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:24.962247    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0620 17:03:25.189748    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:25.463217    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:25.463615    8431 kapi.go:107] duration metric: took 20.508124596s to wait for kubernetes.io/minikube-addons=registry ...
	I0620 17:03:25.688726    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:25.967953    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:26.188821    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:26.458296    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:26.689318    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:26.958889    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:27.190489    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:27.459917    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:27.690318    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:27.959358    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:28.190159    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:28.459759    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:28.689689    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:28.959295    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:29.189810    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:29.459818    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:29.691276    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:29.959611    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:30.189512    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:30.459349    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:30.690809    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:30.960241    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:31.190352    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:31.460139    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:31.690976    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:31.959387    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:32.193711    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:32.460432    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:32.711536    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:32.959925    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:33.188491    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:33.459471    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:33.689851    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:33.960295    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:34.189700    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:34.458793    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:34.689338    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:34.959320    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:35.192175    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:35.459681    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:35.690343    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:35.960118    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:36.191583    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:36.459529    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:36.690190    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:36.959096    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:37.189116    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:37.459648    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:37.689088    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:37.960544    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:38.189093    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:38.459252    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:38.690190    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:38.959398    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:39.190424    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:39.462134    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:39.690655    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:39.959415    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:40.188974    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:40.459787    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:40.688910    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:40.959732    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:41.189308    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:41.459315    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:41.690065    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:41.959327    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:42.194745    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:42.460293    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:42.688696    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:42.992642    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:43.189208    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:43.458881    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:43.691400    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:43.958820    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:44.189449    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:44.459586    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:44.688709    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:44.959068    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:45.189587    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:45.459818    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:45.688831    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:45.958903    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:46.189117    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:46.459690    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:46.688838    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:46.959871    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:47.188791    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:47.459558    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:47.688222    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:47.962624    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:48.189635    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:48.460055    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:48.696061    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:48.959901    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:49.188529    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:49.459378    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:49.689439    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:49.966165    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:50.193190    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:50.459623    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:50.688477    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:50.959416    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:51.189513    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:51.459215    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:51.689222    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:51.959681    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:52.189128    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:52.462202    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:52.690965    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:52.959270    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:53.189713    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:53.459496    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:53.688806    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:53.959175    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:54.189035    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:54.459642    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:54.689711    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:54.959767    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:55.189162    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:55.459055    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:55.690134    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:55.959235    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:56.188301    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:56.459251    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:56.689323    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:56.959795    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:57.188564    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:57.460456    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:57.689269    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0620 17:03:57.959816    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:58.188567    8431 kapi.go:107] duration metric: took 52.505587973s to wait for kubernetes.io/minikube-addons=csi-hostpath-driver ...
	I0620 17:03:58.458730    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:58.959549    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:59.459726    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:03:59.958379    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:04:00.471244    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:04:00.958704    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:04:01.458741    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:04:01.959968    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:04:02.459968    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:04:02.958956    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:04:03.459783    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:04:03.959621    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:04:04.458494    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:04:04.959883    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:04:05.464822    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:04:05.961312    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:04:06.459630    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:04:06.959299    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:04:07.459390    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:04:07.966766    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:04:08.461415    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:04:08.959138    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:04:09.467033    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:04:09.959236    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:04:10.460350    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:04:10.959424    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:04:11.459759    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:04:11.959801    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:04:12.459251    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:04:12.961919    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:04:13.458814    8431 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0620 17:04:13.960237    8431 kapi.go:107] duration metric: took 1m9.005627458s to wait for app.kubernetes.io/name=ingress-nginx ...
	I0620 17:04:29.588022    8431 kapi.go:86] Found 1 Pods for label selector kubernetes.io/minikube-addons=gcp-auth
	I0620 17:04:29.588045    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:30.087734    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:30.585617    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:31.085747    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:31.585845    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:32.086085    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:32.586479    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:33.085631    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:33.586221    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:34.085809    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:34.586131    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:35.085868    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:35.585369    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:36.086599    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:36.585864    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:37.086211    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:37.586344    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:38.086778    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:38.585364    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:39.086146    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:39.585760    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:40.086853    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:40.585497    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:41.085812    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:41.585569    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:42.086955    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:42.585950    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:43.085866    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:43.585544    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:44.086067    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:44.585477    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:45.086706    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:45.585484    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:46.085495    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:46.586419    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:47.085250    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:47.586144    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:48.085711    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:48.585454    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:49.086053    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:49.585847    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:50.085440    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:50.585466    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:51.086496    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:51.586211    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:52.085747    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:52.585355    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:53.086972    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:53.585582    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:54.085887    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:54.586314    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:55.086685    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:55.585824    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:56.085444    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:56.586005    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:57.086945    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:57.586118    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:58.085548    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:58.586181    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:59.085945    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:04:59.586356    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:00.092101    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:00.586663    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:01.085502    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:01.586687    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:02.086143    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:02.585387    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:03.086331    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:03.586065    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:04.085858    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:04.585774    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:05.085390    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:05.585715    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:06.085767    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:06.586137    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:07.086246    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:07.585268    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:08.085997    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:08.585267    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:09.087092    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:09.586085    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:10.085768    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:10.589540    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:11.087182    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:11.585884    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:12.086742    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:12.585905    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:13.085880    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:13.585704    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:14.086157    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:14.585595    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:15.087035    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:15.585411    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:16.086203    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:16.586718    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:17.086409    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:17.586175    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:18.086436    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:18.585746    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:19.086237    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:19.585785    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:20.085827    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:20.585760    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:21.085568    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:21.586538    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:22.086588    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:22.585911    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:23.086487    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:23.586202    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:24.086517    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:24.585428    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:25.086326    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:25.586061    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:26.085424    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:26.586254    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:27.085647    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:27.585223    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:28.086334    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:28.586298    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:29.086247    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:29.585930    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:30.087831    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:30.586363    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:31.085895    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:31.586217    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:32.086340    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:32.586253    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:33.087130    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:33.586369    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:34.085875    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:34.585420    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:35.085906    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:35.587175    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:36.090560    8431 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0620 17:05:36.585709    8431 kapi.go:107] duration metric: took 2m29.003582334s to wait for kubernetes.io/minikube-addons=gcp-auth ...
	I0620 17:05:36.587671    8431 out.go:177] * Your GCP credentials will now be mounted into every pod created in the addons-705802 cluster.
	I0620 17:05:36.589427    8431 out.go:177] * If you don't want your credentials mounted into a specific pod, add a label with the `gcp-auth-skip-secret` key to your pod configuration.
	I0620 17:05:36.591227    8431 out.go:177] * If you want existing pods to be mounted with credentials, either recreate them or rerun addons enable with --refresh.
	I0620 17:05:36.593024    8431 out.go:177] * Enabled addons: cloud-spanner, volcano, nvidia-device-plugin, ingress-dns, storage-provisioner, metrics-server, inspektor-gadget, yakd, storage-provisioner-rancher, volumesnapshots, registry, csi-hostpath-driver, ingress, gcp-auth
	I0620 17:05:36.594815    8431 addons.go:510] duration metric: took 2m44.081111578s for enable addons: enabled=[cloud-spanner volcano nvidia-device-plugin ingress-dns storage-provisioner metrics-server inspektor-gadget yakd storage-provisioner-rancher volumesnapshots registry csi-hostpath-driver ingress gcp-auth]
	I0620 17:05:36.594866    8431 start.go:245] waiting for cluster config update ...
	I0620 17:05:36.594886    8431 start.go:254] writing updated cluster config ...
	I0620 17:05:36.595225    8431 ssh_runner.go:195] Run: rm -f paused
	I0620 17:05:36.927430    8431 start.go:600] kubectl: 1.30.2, cluster: 1.30.2 (minor skew: 0)
	I0620 17:05:36.929356    8431 out.go:177] * Done! kubectl is now configured to use "addons-705802" cluster and "default" namespace by default
	
	
	==> Docker <==
	Jun 20 17:07:29 addons-705802 cri-dockerd[1357]: time="2024-06-20T17:07:29Z" level=info msg="Stop pulling image gcr.io/google-samples/hello-app:1.0: Status: Downloaded newer image for gcr.io/google-samples/hello-app:1.0"
	Jun 20 17:07:29 addons-705802 dockerd[1145]: time="2024-06-20T17:07:29.615713123Z" level=info msg="ignoring event" container=1c72bd1e95db57a6bc18269b88148ee0ea5e087b22080ae06b72befdff65c584 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
	Jun 20 17:07:30 addons-705802 dockerd[1145]: time="2024-06-20T17:07:30.516872663Z" level=info msg="ignoring event" container=5e254b93722f70824b0fce334e7f352474736f5801bfd5ef7e42663910fb423f module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
	Jun 20 17:07:33 addons-705802 dockerd[1145]: time="2024-06-20T17:07:33.803665832Z" level=info msg="ignoring event" container=f9ffc03cb5f218f46dc3d30c252300e844ed2141859dc7f5165c6757be73e62f module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
	Jun 20 17:07:33 addons-705802 dockerd[1145]: time="2024-06-20T17:07:33.908772117Z" level=info msg="ignoring event" container=a2c92a520f4d542bf2d23c2a0e188115c18f774695676c0abfccbc3df76c082f module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
	Jun 20 17:07:35 addons-705802 dockerd[1145]: time="2024-06-20T17:07:35.706921796Z" level=info msg="ignoring event" container=0376fa89d1e9f524c5a4fe542aded545b3a9f8c86a84e191ad6285f6cb51522c module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
	Jun 20 17:07:35 addons-705802 dockerd[1145]: time="2024-06-20T17:07:35.727763610Z" level=info msg="ignoring event" container=67e0ec36889be0d0fcc971e1d4a1a0e98c9505b22ca4d8a6e58ed9035f7f3ab4 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
	Jun 20 17:07:35 addons-705802 dockerd[1145]: time="2024-06-20T17:07:35.737757964Z" level=info msg="ignoring event" container=8c366068f56d18a3ec3f65516b77b8d5a3d16b3265c06f3c4f776e1f3bcf850b module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
	Jun 20 17:07:35 addons-705802 dockerd[1145]: time="2024-06-20T17:07:35.874709601Z" level=info msg="ignoring event" container=3333e9a06524bf3d2e88e97573cae2ce2b76a0305dc4551a189195d4fa117ce9 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
	Jun 20 17:07:35 addons-705802 dockerd[1145]: time="2024-06-20T17:07:35.876460029Z" level=info msg="ignoring event" container=beccb26b23d473b3494364d688fa00cc2f5afd9ae04ef1ef4282c505c070744b module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
	Jun 20 17:07:35 addons-705802 dockerd[1145]: time="2024-06-20T17:07:35.889583357Z" level=info msg="ignoring event" container=e9e6732afe653787a076fafa211732c02b8b0d8ffdbc4e729d322cff171a46f5 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
	Jun 20 17:07:35 addons-705802 dockerd[1145]: time="2024-06-20T17:07:35.897712093Z" level=info msg="ignoring event" container=5c6f2dde8f61ce2817a43e0ee5665e2cf35a6b23adfbdbd3f557a74661bade33 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
	Jun 20 17:07:35 addons-705802 dockerd[1145]: time="2024-06-20T17:07:35.911383121Z" level=info msg="ignoring event" container=5d53a49f9eafb2d5811f0890408082ae1f29bc1354460c31d6ab1deeecea545c module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
	Jun 20 17:07:35 addons-705802 dockerd[1145]: time="2024-06-20T17:07:35.977559400Z" level=info msg="ignoring event" container=9e4d219e297fe5c5d595a9877fdb5cb745177d7aa95920b544c56ded5aee6021 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
	Jun 20 17:07:36 addons-705802 dockerd[1145]: time="2024-06-20T17:07:36.097848403Z" level=info msg="ignoring event" container=213437ae109f85e74428d182e78140bb7d49e9560b2165c1cde77f5ebbf9a26a module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
	Jun 20 17:07:36 addons-705802 dockerd[1145]: time="2024-06-20T17:07:36.171685052Z" level=info msg="ignoring event" container=a5fe0489c69ef040fad5c32b0b5727d27e0631d5a33b9441f2e569b06579f33a module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
	Jun 20 17:07:42 addons-705802 dockerd[1145]: time="2024-06-20T17:07:42.318000999Z" level=info msg="ignoring event" container=ccc32bbddb1b02c7d5b778857874a1204f1a00f43b2fc6b114a4eab4fc8fcc82 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
	Jun 20 17:07:42 addons-705802 dockerd[1145]: time="2024-06-20T17:07:42.322340119Z" level=info msg="ignoring event" container=f542f855136e0f9a03bd6e7ccb6e0c23d4d4ccad4c59889b4e6f5b7a8e95ad33 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
	Jun 20 17:07:42 addons-705802 dockerd[1145]: time="2024-06-20T17:07:42.507322366Z" level=info msg="ignoring event" container=1a2df0e12c135b932e09ad1a109a6d98b4c827ac0619f51f941bf98cfb995c4b module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
	Jun 20 17:07:42 addons-705802 dockerd[1145]: time="2024-06-20T17:07:42.551087849Z" level=info msg="ignoring event" container=add87d7bc184902ea5c4f5d4f576c38df7a89ba7d9f1ee37ab17afd049c33b70 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
	Jun 20 17:07:42 addons-705802 dockerd[1145]: time="2024-06-20T17:07:42.974361300Z" level=info msg="ignoring event" container=f2a17f0f73996ec5842871f19e3f44133cbcafeabe935601be8c4fd125c16013 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
	Jun 20 17:07:46 addons-705802 dockerd[1145]: time="2024-06-20T17:07:46.095114488Z" level=info msg="ignoring event" container=9b1719eaba0f6884df1d555ff8284cd7065c3de89f2dd3519beb618e4719273f module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
	Jun 20 17:07:47 addons-705802 dockerd[1145]: time="2024-06-20T17:07:47.394591419Z" level=info msg="Container failed to exit within 2s of signal 15 - using the force" container=f460eba2ff8579fcfdc0158aa99d3a9049e84e2e908fdcf46331fe7c9d981c26 spanID=197e0bcb3f0fac3d traceID=51906e035afa5712653974d42d20967f
	Jun 20 17:07:47 addons-705802 dockerd[1145]: time="2024-06-20T17:07:47.453195823Z" level=info msg="ignoring event" container=f460eba2ff8579fcfdc0158aa99d3a9049e84e2e908fdcf46331fe7c9d981c26 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
	Jun 20 17:07:47 addons-705802 dockerd[1145]: time="2024-06-20T17:07:47.562110566Z" level=info msg="ignoring event" container=0fb48056df3ba23ccf2932e9fe093bc16e770e45ca69cc00685a346e865f1eca module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
	
	
	==> container status <==
	CONTAINER           IMAGE                                                                                                                        CREATED             STATE               NAME                      ATTEMPT             POD ID              POD
	9b1719eaba0f6       dd1b12fcb6097                                                                                                                7 seconds ago       Exited              hello-world-app           2                   ebd2ecdc3af67       hello-world-app-86c47465fc-qtr52
	a1dab6d884905       nginx@sha256:69f8c2c72671490607f52122be2af27d4fc09657ff57e42045801aa93d2090f7                                                33 seconds ago      Running             nginx                     0                   25b51ba04aad1       nginx
	3e5ee3e44a4e7       ghcr.io/headlamp-k8s/headlamp@sha256:c48d3702275225be765218b1caffea7fc514ed31bc11533af71ffd1ee6f2fde1                        2 minutes ago       Running             headlamp                  0                   8b60877a9c591       headlamp-7fc69f7444-l88s9
	3f8b579480a9a       gcr.io/k8s-minikube/gcp-auth-webhook@sha256:e6c5b3bc32072ea370d34c27836efd11b3519d25bd444c2a8efc339cff0e20fb                 2 minutes ago       Running             gcp-auth                  0                   48c2f1227709a       gcp-auth-5db96cd9b4-kzhkn
	97c76ccc02271       296b5f799fcd8                                                                                                                4 minutes ago       Exited              patch                     1                   fb0bde534fa7c       ingress-nginx-admission-patch-p7kzw
	ce95273809cf8       registry.k8s.io/ingress-nginx/kube-webhook-certgen@sha256:36d05b4077fb8e3d13663702fa337f124675ba8667cbd949c03a8e8ea6fa4366   4 minutes ago       Exited              create                    0                   e91ec2a05ca97       ingress-nginx-admission-create-lk64p
	68a27c7e2b099       marcnuri/yakd@sha256:a3f540278e4c11373e15605311851dd9c64d208f4d63e727bccc0e39f9329310                                        4 minutes ago       Running             yakd                      0                   0038e8effb9e8       yakd-dashboard-5ddbf7d777-qvpwn
	76efcb1684e94       ba04bb24b9575                                                                                                                4 minutes ago       Running             storage-provisioner       0                   6c6ce2635d6eb       storage-provisioner
	f7822fc676e19       2437cf7621777                                                                                                                4 minutes ago       Running             coredns                   0                   8474d1d80223a       coredns-7db6d8ff4d-xx9rs
	61ca478d0c73b       66dbb96a9149f                                                                                                                5 minutes ago       Running             kube-proxy                0                   1c2068e322be4       kube-proxy-5znkp
	644e078a4fffc       e1dcc3400d3ea                                                                                                                5 minutes ago       Running             kube-controller-manager   0                   fdaaf9b7d6154       kube-controller-manager-addons-705802
	ee118e0e0241a       84c601f3f72c8                                                                                                                5 minutes ago       Running             kube-apiserver            0                   022d99d8b3d83       kube-apiserver-addons-705802
	5146d4680a282       c7dd04b1bafeb                                                                                                                5 minutes ago       Running             kube-scheduler            0                   fffae5bb0d009       kube-scheduler-addons-705802
	8b9baba27f3f7       014faa467e297                                                                                                                5 minutes ago       Running             etcd                      0                   1df02718d37b3       etcd-addons-705802
	
	
	==> coredns [f7822fc676e1] <==
	[INFO] 10.244.0.21:50385 - 404 "AAAA IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000060396s
	[INFO] 10.244.0.21:55399 - 40355 "A IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.002442545s
	[INFO] 10.244.0.21:50385 - 35466 "A IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001471131s
	[INFO] 10.244.0.21:55399 - 43771 "AAAA IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001910098s
	[INFO] 10.244.0.21:50385 - 22971 "AAAA IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.002346291s
	[INFO] 10.244.0.21:50385 - 27474 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000131017s
	[INFO] 10.244.0.21:55399 - 51485 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000071064s
	[INFO] 10.244.0.21:58951 - 15210 "A IN hello-world-app.default.svc.cluster.local.ingress-nginx.svc.cluster.local. udp 91 false 512" NXDOMAIN qr,aa,rd 184 0.00010568s
	[INFO] 10.244.0.21:50772 - 46706 "A IN hello-world-app.default.svc.cluster.local.ingress-nginx.svc.cluster.local. udp 91 false 512" NXDOMAIN qr,aa,rd 184 0.000201449s
	[INFO] 10.244.0.21:50772 - 47540 "AAAA IN hello-world-app.default.svc.cluster.local.ingress-nginx.svc.cluster.local. udp 91 false 512" NXDOMAIN qr,aa,rd 184 0.000051683s
	[INFO] 10.244.0.21:58951 - 37902 "AAAA IN hello-world-app.default.svc.cluster.local.ingress-nginx.svc.cluster.local. udp 91 false 512" NXDOMAIN qr,aa,rd 184 0.000052101s
	[INFO] 10.244.0.21:58951 - 44558 "A IN hello-world-app.default.svc.cluster.local.svc.cluster.local. udp 77 false 512" NXDOMAIN qr,aa,rd 170 0.00004996s
	[INFO] 10.244.0.21:50772 - 52534 "A IN hello-world-app.default.svc.cluster.local.svc.cluster.local. udp 77 false 512" NXDOMAIN qr,aa,rd 170 0.000047736s
	[INFO] 10.244.0.21:58951 - 42552 "AAAA IN hello-world-app.default.svc.cluster.local.svc.cluster.local. udp 77 false 512" NXDOMAIN qr,aa,rd 170 0.000046408s
	[INFO] 10.244.0.21:50772 - 33081 "AAAA IN hello-world-app.default.svc.cluster.local.svc.cluster.local. udp 77 false 512" NXDOMAIN qr,aa,rd 170 0.000040639s
	[INFO] 10.244.0.21:58951 - 4264 "A IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000034798s
	[INFO] 10.244.0.21:58951 - 56128 "AAAA IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000045916s
	[INFO] 10.244.0.21:50772 - 3581 "A IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000043519s
	[INFO] 10.244.0.21:50772 - 54868 "AAAA IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000049542s
	[INFO] 10.244.0.21:58951 - 6677 "A IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001273924s
	[INFO] 10.244.0.21:50772 - 3232 "A IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.000830115s
	[INFO] 10.244.0.21:50772 - 58321 "AAAA IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001186196s
	[INFO] 10.244.0.21:58951 - 26852 "AAAA IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001189855s
	[INFO] 10.244.0.21:58951 - 10014 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000062826s
	[INFO] 10.244.0.21:50772 - 36175 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000080105s
	
	
	==> describe nodes <==
	Name:               addons-705802
	Roles:              control-plane
	Labels:             beta.kubernetes.io/arch=arm64
	                    beta.kubernetes.io/os=linux
	                    kubernetes.io/arch=arm64
	                    kubernetes.io/hostname=addons-705802
	                    kubernetes.io/os=linux
	                    minikube.k8s.io/commit=a5bfa5828b76fe92a3c5f89a54d8c76f6b5f3f8b
	                    minikube.k8s.io/name=addons-705802
	                    minikube.k8s.io/primary=true
	                    minikube.k8s.io/updated_at=2024_06_20T17_02_38_0700
	                    minikube.k8s.io/version=v1.33.1
	                    node-role.kubernetes.io/control-plane=
	                    node.kubernetes.io/exclude-from-external-load-balancers=
	                    topology.hostpath.csi/node=addons-705802
	Annotations:        kubeadm.alpha.kubernetes.io/cri-socket: unix:///var/run/cri-dockerd.sock
	                    node.alpha.kubernetes.io/ttl: 0
	                    volumes.kubernetes.io/controller-managed-attach-detach: true
	CreationTimestamp:  Thu, 20 Jun 2024 17:02:35 +0000
	Taints:             <none>
	Unschedulable:      false
	Lease:
	  HolderIdentity:  addons-705802
	  AcquireTime:     <unset>
	  RenewTime:       Thu, 20 Jun 2024 17:07:44 +0000
	Conditions:
	  Type             Status  LastHeartbeatTime                 LastTransitionTime                Reason                       Message
	  ----             ------  -----------------                 ------------------                ------                       -------
	  MemoryPressure   False   Thu, 20 Jun 2024 17:07:45 +0000   Thu, 20 Jun 2024 17:02:31 +0000   KubeletHasSufficientMemory   kubelet has sufficient memory available
	  DiskPressure     False   Thu, 20 Jun 2024 17:07:45 +0000   Thu, 20 Jun 2024 17:02:31 +0000   KubeletHasNoDiskPressure     kubelet has no disk pressure
	  PIDPressure      False   Thu, 20 Jun 2024 17:07:45 +0000   Thu, 20 Jun 2024 17:02:31 +0000   KubeletHasSufficientPID      kubelet has sufficient PID available
	  Ready            True    Thu, 20 Jun 2024 17:07:45 +0000   Thu, 20 Jun 2024 17:02:38 +0000   KubeletReady                 kubelet is posting ready status
	Addresses:
	  InternalIP:  192.168.49.2
	  Hostname:    addons-705802
	Capacity:
	  cpu:                2
	  ephemeral-storage:  203034800Ki
	  hugepages-1Gi:      0
	  hugepages-2Mi:      0
	  hugepages-32Mi:     0
	  hugepages-64Ki:     0
	  memory:             8022364Ki
	  pods:               110
	Allocatable:
	  cpu:                2
	  ephemeral-storage:  203034800Ki
	  hugepages-1Gi:      0
	  hugepages-2Mi:      0
	  hugepages-32Mi:     0
	  hugepages-64Ki:     0
	  memory:             8022364Ki
	  pods:               110
	System Info:
	  Machine ID:                 f56acf9171fa4bf7a2603b702552b339
	  System UUID:                001afeb8-d39a-4e94-ba78-6f018d6c6f08
	  Boot ID:                    c14a5c8e-2318-4449-baf4-6a576bee7c02
	  Kernel Version:             5.15.0-1063-aws
	  OS Image:                   Ubuntu 22.04.4 LTS
	  Operating System:           linux
	  Architecture:               arm64
	  Container Runtime Version:  docker://26.1.4
	  Kubelet Version:            v1.30.2
	  Kube-Proxy Version:         v1.30.2
	PodCIDR:                      10.244.0.0/24
	PodCIDRs:                     10.244.0.0/24
	Non-terminated Pods:          (12 in total)
	  Namespace                   Name                                     CPU Requests  CPU Limits  Memory Requests  Memory Limits  Age
	  ---------                   ----                                     ------------  ----------  ---------------  -------------  ---
	  default                     hello-world-app-86c47465fc-qtr52         0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         26s
	  default                     nginx                                    0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         35s
	  gcp-auth                    gcp-auth-5db96cd9b4-kzhkn                0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         3m23s
	  headlamp                    headlamp-7fc69f7444-l88s9                0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         2m15s
	  kube-system                 coredns-7db6d8ff4d-xx9rs                 100m (5%!)(MISSING)     0 (0%!)(MISSING)      70Mi (0%!)(MISSING)        170Mi (2%!)(MISSING)     5m
	  kube-system                 etcd-addons-705802                       100m (5%!)(MISSING)     0 (0%!)(MISSING)      100Mi (1%!)(MISSING)       0 (0%!)(MISSING)         5m14s
	  kube-system                 kube-apiserver-addons-705802             250m (12%!)(MISSING)    0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         5m14s
	  kube-system                 kube-controller-manager-addons-705802    200m (10%!)(MISSING)    0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         5m14s
	  kube-system                 kube-proxy-5znkp                         0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         5m1s
	  kube-system                 kube-scheduler-addons-705802             100m (5%!)(MISSING)     0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         5m15s
	  kube-system                 storage-provisioner                      0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         4m55s
	  yakd-dashboard              yakd-dashboard-5ddbf7d777-qvpwn          0 (0%!)(MISSING)        0 (0%!)(MISSING)      128Mi (1%!)(MISSING)       256Mi (3%!)(MISSING)     4m53s
	Allocated resources:
	  (Total limits may be over 100 percent, i.e., overcommitted.)
	  Resource           Requests    Limits
	  --------           --------    ------
	  cpu                750m (37%!)(MISSING)  0 (0%!)(MISSING)
	  memory             298Mi (3%!)(MISSING)  426Mi (5%!)(MISSING)
	  ephemeral-storage  0 (0%!)(MISSING)      0 (0%!)(MISSING)
	  hugepages-1Gi      0 (0%!)(MISSING)      0 (0%!)(MISSING)
	  hugepages-2Mi      0 (0%!)(MISSING)      0 (0%!)(MISSING)
	  hugepages-32Mi     0 (0%!)(MISSING)      0 (0%!)(MISSING)
	  hugepages-64Ki     0 (0%!)(MISSING)      0 (0%!)(MISSING)
	Events:
	  Type    Reason                   Age    From             Message
	  ----    ------                   ----   ----             -------
	  Normal  Starting                 4m59s  kube-proxy       
	  Normal  Starting                 5m15s  kubelet          Starting kubelet.
	  Normal  NodeHasSufficientMemory  5m14s  kubelet          Node addons-705802 status is now: NodeHasSufficientMemory
	  Normal  NodeHasNoDiskPressure    5m14s  kubelet          Node addons-705802 status is now: NodeHasNoDiskPressure
	  Normal  NodeHasSufficientPID     5m14s  kubelet          Node addons-705802 status is now: NodeHasSufficientPID
	  Normal  NodeNotReady             5m14s  kubelet          Node addons-705802 status is now: NodeNotReady
	  Normal  NodeAllocatableEnforced  5m14s  kubelet          Updated Node Allocatable limit across pods
	  Normal  NodeReady                5m14s  kubelet          Node addons-705802 status is now: NodeReady
	  Normal  RegisteredNode           5m1s   node-controller  Node addons-705802 event: Registered Node addons-705802 in Controller
	
	
	==> dmesg <==
	[Jun20 16:17] ACPI: SRAT not present
	[  +0.000000] ACPI: SRAT not present
	[  +0.000000] SPI driver altr_a10sr has no spi_device_id for altr,a10sr
	[  +0.014778] device-mapper: core: CONFIG_IMA_DISABLE_HTABLE is disabled. Duplicate IMA measurements will not be recorded in the IMA log.
	[  +0.488827] systemd[1]: /lib/systemd/system/cloud-init-local.service:15: Unknown key name 'ConditionEnvironment' in section 'Unit', ignoring.
	[  +0.002529] systemd[1]: /lib/systemd/system/cloud-init.service:19: Unknown key name 'ConditionEnvironment' in section 'Unit', ignoring.
	[  +0.014946] systemd[1]: /lib/systemd/system/cloud-init.target:15: Unknown key name 'ConditionEnvironment' in section 'Unit', ignoring.
	[  +0.004327] systemd[1]: /lib/systemd/system/cloud-final.service:9: Unknown key name 'ConditionEnvironment' in section 'Unit', ignoring.
	[  +0.003264] systemd[1]: /lib/systemd/system/cloud-config.service:8: Unknown key name 'ConditionEnvironment' in section 'Unit', ignoring.
	[  +0.681144] ena 0000:00:05.0: LLQ is not supported Fallback to host mode policy.
	[  +6.161888] kauditd_printk_skb: 36 callbacks suppressed
	
	
	==> etcd [8b9baba27f3f] <==
	{"level":"info","ts":"2024-06-20T17:02:31.419119Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc switched to configuration voters=(12593026477526642892)"}
	{"level":"info","ts":"2024-06-20T17:02:31.419271Z","caller":"membership/cluster.go:421","msg":"added member","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","added-peer-id":"aec36adc501070cc","added-peer-peer-urls":["https://192.168.49.2:2380"]}
	{"level":"info","ts":"2024-06-20T17:02:31.425233Z","caller":"embed/etcd.go:726","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
	{"level":"info","ts":"2024-06-20T17:02:31.425504Z","caller":"embed/etcd.go:277","msg":"now serving peer/client/metrics","local-member-id":"aec36adc501070cc","initial-advertise-peer-urls":["https://192.168.49.2:2380"],"listen-peer-urls":["https://192.168.49.2:2380"],"advertise-client-urls":["https://192.168.49.2:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.49.2:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
	{"level":"info","ts":"2024-06-20T17:02:31.425559Z","caller":"embed/etcd.go:857","msg":"serving metrics","address":"http://127.0.0.1:2381"}
	{"level":"info","ts":"2024-06-20T17:02:31.425702Z","caller":"embed/etcd.go:597","msg":"serving peer traffic","address":"192.168.49.2:2380"}
	{"level":"info","ts":"2024-06-20T17:02:31.42574Z","caller":"embed/etcd.go:569","msg":"cmux::serve","address":"192.168.49.2:2380"}
	{"level":"info","ts":"2024-06-20T17:02:32.107038Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc is starting a new election at term 1"}
	{"level":"info","ts":"2024-06-20T17:02:32.10714Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became pre-candidate at term 1"}
	{"level":"info","ts":"2024-06-20T17:02:32.107178Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgPreVoteResp from aec36adc501070cc at term 1"}
	{"level":"info","ts":"2024-06-20T17:02:32.107226Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became candidate at term 2"}
	{"level":"info","ts":"2024-06-20T17:02:32.107254Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgVoteResp from aec36adc501070cc at term 2"}
	{"level":"info","ts":"2024-06-20T17:02:32.107283Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became leader at term 2"}
	{"level":"info","ts":"2024-06-20T17:02:32.107315Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: aec36adc501070cc elected leader aec36adc501070cc at term 2"}
	{"level":"info","ts":"2024-06-20T17:02:32.111106Z","caller":"etcdserver/server.go:2578","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
	{"level":"info","ts":"2024-06-20T17:02:32.115186Z","caller":"etcdserver/server.go:2068","msg":"published local member to cluster through raft","local-member-id":"aec36adc501070cc","local-member-attributes":"{Name:addons-705802 ClientURLs:[https://192.168.49.2:2379]}","request-path":"/0/members/aec36adc501070cc/attributes","cluster-id":"fa54960ea34d58be","publish-timeout":"7s"}
	{"level":"info","ts":"2024-06-20T17:02:32.115372Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
	{"level":"info","ts":"2024-06-20T17:02:32.115461Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
	{"level":"info","ts":"2024-06-20T17:02:32.119027Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
	{"level":"info","ts":"2024-06-20T17:02:32.119199Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
	{"level":"info","ts":"2024-06-20T17:02:32.120599Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.49.2:2379"}
	{"level":"info","ts":"2024-06-20T17:02:32.120701Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","cluster-version":"3.5"}
	{"level":"info","ts":"2024-06-20T17:02:32.12076Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
	{"level":"info","ts":"2024-06-20T17:02:32.120782Z","caller":"etcdserver/server.go:2602","msg":"cluster version is updated","cluster-version":"3.5"}
	{"level":"info","ts":"2024-06-20T17:02:32.127286Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
	
	
	==> gcp-auth [3f8b579480a9] <==
	2024/06/20 17:05:37 Ready to write response ...
	2024/06/20 17:05:37 Ready to marshal response ...
	2024/06/20 17:05:37 Ready to write response ...
	2024/06/20 17:05:37 Ready to marshal response ...
	2024/06/20 17:05:37 Ready to write response ...
	2024/06/20 17:05:48 Ready to marshal response ...
	2024/06/20 17:05:48 Ready to write response ...
	2024/06/20 17:06:04 Ready to marshal response ...
	2024/06/20 17:06:04 Ready to write response ...
	2024/06/20 17:06:04 Ready to marshal response ...
	2024/06/20 17:06:04 Ready to write response ...
	2024/06/20 17:06:05 Ready to marshal response ...
	2024/06/20 17:06:05 Ready to write response ...
	2024/06/20 17:06:06 Ready to marshal response ...
	2024/06/20 17:06:06 Ready to write response ...
	2024/06/20 17:06:16 Ready to marshal response ...
	2024/06/20 17:06:16 Ready to write response ...
	2024/06/20 17:06:55 Ready to marshal response ...
	2024/06/20 17:06:55 Ready to write response ...
	2024/06/20 17:07:17 Ready to marshal response ...
	2024/06/20 17:07:17 Ready to write response ...
	2024/06/20 17:07:25 Ready to marshal response ...
	2024/06/20 17:07:25 Ready to write response ...
	2024/06/20 17:07:26 Ready to marshal response ...
	2024/06/20 17:07:26 Ready to write response ...
	
	
	==> kernel <==
	 17:07:52 up 50 min,  0 users,  load average: 1.79, 1.50, 0.79
	Linux addons-705802 5.15.0-1063-aws #69~20.04.1-Ubuntu SMP Fri May 10 19:21:30 UTC 2024 aarch64 aarch64 aarch64 GNU/Linux
	PRETTY_NAME="Ubuntu 22.04.4 LTS"
	
	
	==> kube-apiserver [ee118e0e0241] <==
	I0620 17:06:20.968233       1 handler.go:286] Adding GroupVersion nodeinfo.volcano.sh v1alpha1 to ResourceManager
	W0620 17:06:21.483133       1 cacher.go:168] Terminating all watchers from cacher commands.bus.volcano.sh
	W0620 17:06:21.961131       1 cacher.go:168] Terminating all watchers from cacher podgroups.scheduling.volcano.sh
	W0620 17:06:22.038596       1 cacher.go:168] Terminating all watchers from cacher queues.scheduling.volcano.sh
	W0620 17:06:22.038827       1 cacher.go:168] Terminating all watchers from cacher numatopologies.nodeinfo.volcano.sh
	W0620 17:06:22.205521       1 cacher.go:168] Terminating all watchers from cacher jobs.batch.volcano.sh
	E0620 17:06:32.216231       1 authentication.go:73] "Unable to authenticate the request" err="[invalid bearer token, serviceaccounts \"local-path-provisioner-service-account\" not found]"
	I0620 17:07:03.423714       1 controller.go:615] quota admission added evaluator for: volumesnapshots.snapshot.storage.k8s.io
	I0620 17:07:05.888204       1 handler.go:286] Adding GroupVersion gadget.kinvolk.io v1alpha1 to ResourceManager
	W0620 17:07:06.992814       1 cacher.go:168] Terminating all watchers from cacher traces.gadget.kinvolk.io
	I0620 17:07:17.179277       1 controller.go:615] quota admission added evaluator for: ingresses.networking.k8s.io
	I0620 17:07:17.439674       1 alloc.go:330] "allocated clusterIPs" service="default/nginx" clusterIPs={"IPv4":"10.98.238.73"}
	I0620 17:07:27.151139       1 alloc.go:330] "allocated clusterIPs" service="default/hello-world-app" clusterIPs={"IPv4":"10.109.65.236"}
	I0620 17:07:34.905958       1 controller.go:129] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Nothing (removed from the queue).
	I0620 17:07:42.071615       1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
	I0620 17:07:42.071659       1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
	I0620 17:07:42.095771       1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
	I0620 17:07:42.095838       1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
	I0620 17:07:42.141203       1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
	I0620 17:07:42.141264       1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
	I0620 17:07:42.176334       1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
	I0620 17:07:42.176660       1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
	W0620 17:07:43.141531       1 cacher.go:168] Terminating all watchers from cacher volumesnapshotclasses.snapshot.storage.k8s.io
	W0620 17:07:43.176418       1 cacher.go:168] Terminating all watchers from cacher volumesnapshotcontents.snapshot.storage.k8s.io
	W0620 17:07:43.186944       1 cacher.go:168] Terminating all watchers from cacher volumesnapshots.snapshot.storage.k8s.io
	
	
	==> kube-controller-manager [644e078a4fff] <==
	E0620 17:07:44.293401       1 reflector.go:150] k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	I0620 17:07:44.359490       1 job_controller.go:566] "enqueueing job" logger="job-controller" key="ingress-nginx/ingress-nginx-admission-create"
	I0620 17:07:44.363826       1 replica_set.go:676] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="ingress-nginx/ingress-nginx-controller-768f948f8f" duration="11.856µs"
	I0620 17:07:44.366651       1 job_controller.go:566] "enqueueing job" logger="job-controller" key="ingress-nginx/ingress-nginx-admission-patch"
	W0620 17:07:44.604363       1 reflector.go:547] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	E0620 17:07:44.604400       1 reflector.go:150] k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	W0620 17:07:45.960327       1 reflector.go:547] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	E0620 17:07:45.960365       1 reflector.go:150] k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	I0620 17:07:46.853636       1 replica_set.go:676] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-86c47465fc" duration="58.641µs"
	W0620 17:07:47.273934       1 reflector.go:547] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	E0620 17:07:47.273969       1 reflector.go:150] k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	W0620 17:07:47.310145       1 reflector.go:547] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	E0620 17:07:47.310185       1 reflector.go:150] k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	W0620 17:07:49.382261       1 reflector.go:547] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	E0620 17:07:49.382501       1 reflector.go:150] k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	W0620 17:07:50.664212       1 reflector.go:547] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	E0620 17:07:50.664251       1 reflector.go:150] k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	I0620 17:07:51.581319       1 shared_informer.go:313] Waiting for caches to sync for resource quota
	I0620 17:07:51.581357       1 shared_informer.go:320] Caches are synced for resource quota
	W0620 17:07:51.689540       1 reflector.go:547] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	E0620 17:07:51.689581       1 reflector.go:150] k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	I0620 17:07:52.077714       1 shared_informer.go:313] Waiting for caches to sync for garbage collector
	I0620 17:07:52.077790       1 shared_informer.go:320] Caches are synced for garbage collector
	W0620 17:07:52.347413       1 reflector.go:547] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	E0620 17:07:52.347454       1 reflector.go:150] k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	
	
	==> kube-proxy [61ca478d0c73] <==
	I0620 17:02:52.782812       1 server_linux.go:69] "Using iptables proxy"
	I0620 17:02:52.806776       1 server.go:1062] "Successfully retrieved node IP(s)" IPs=["192.168.49.2"]
	I0620 17:02:52.898754       1 server.go:659] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
	I0620 17:02:52.898810       1 server_linux.go:165] "Using iptables Proxier"
	I0620 17:02:52.950511       1 server_linux.go:511] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
	I0620 17:02:52.950537       1 server_linux.go:528] "Defaulting to no-op detect-local"
	I0620 17:02:52.950568       1 proxier.go:243] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
	I0620 17:02:52.950804       1 server.go:872] "Version info" version="v1.30.2"
	I0620 17:02:52.950815       1 server.go:874] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
	I0620 17:02:52.992288       1 config.go:192] "Starting service config controller"
	I0620 17:02:52.992327       1 shared_informer.go:313] Waiting for caches to sync for service config
	I0620 17:02:52.992408       1 config.go:101] "Starting endpoint slice config controller"
	I0620 17:02:52.992414       1 shared_informer.go:313] Waiting for caches to sync for endpoint slice config
	I0620 17:02:52.996790       1 config.go:319] "Starting node config controller"
	I0620 17:02:52.998894       1 shared_informer.go:313] Waiting for caches to sync for node config
	I0620 17:02:53.094721       1 shared_informer.go:320] Caches are synced for endpoint slice config
	I0620 17:02:53.094787       1 shared_informer.go:320] Caches are synced for service config
	I0620 17:02:53.107315       1 shared_informer.go:320] Caches are synced for node config
	
	
	==> kube-scheduler [5146d4680a28] <==
	W0620 17:02:35.364098       1 reflector.go:547] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
	W0620 17:02:35.364160       1 reflector.go:547] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
	W0620 17:02:35.364193       1 reflector.go:547] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
	E0620 17:02:35.365408       1 reflector.go:150] k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
	E0620 17:02:35.365558       1 reflector.go:150] k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
	E0620 17:02:35.365733       1 reflector.go:150] k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
	W0620 17:02:36.227630       1 reflector.go:547] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
	E0620 17:02:36.227873       1 reflector.go:150] k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
	W0620 17:02:36.228953       1 reflector.go:547] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
	E0620 17:02:36.229111       1 reflector.go:150] k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
	W0620 17:02:36.260259       1 reflector.go:547] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
	E0620 17:02:36.260305       1 reflector.go:150] k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
	W0620 17:02:36.285379       1 reflector.go:547] k8s.io/client-go/informers/factory.go:160: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
	E0620 17:02:36.285628       1 reflector.go:150] k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
	W0620 17:02:36.297219       1 reflector.go:547] k8s.io/client-go/informers/factory.go:160: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
	E0620 17:02:36.297444       1 reflector.go:150] k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
	W0620 17:02:36.303600       1 reflector.go:547] k8s.io/client-go/informers/factory.go:160: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
	E0620 17:02:36.303814       1 reflector.go:150] k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
	W0620 17:02:36.369233       1 reflector.go:547] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
	E0620 17:02:36.369444       1 reflector.go:150] k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
	W0620 17:02:36.465080       1 reflector.go:547] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
	E0620 17:02:36.465119       1 reflector.go:150] k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
	W0620 17:02:36.469418       1 reflector.go:547] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
	E0620 17:02:36.469477       1 reflector.go:150] k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
	I0620 17:02:36.947450       1 shared_informer.go:320] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
	
	
	==> kubelet <==
	Jun 20 17:07:42 addons-705802 kubelet[2185]: I0620 17:07:42.849208    2185 reconciler_common.go:289] "Volume detached for volume \"kube-api-access-xk4v5\" (UniqueName: \"kubernetes.io/projected/06b1bea9-035f-4060-aec1-e44054c29e19-kube-api-access-xk4v5\") on node \"addons-705802\" DevicePath \"\""
	Jun 20 17:07:43 addons-705802 kubelet[2185]: I0620 17:07:43.151381    2185 reconciler_common.go:161] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rn898\" (UniqueName: \"kubernetes.io/projected/3e89464b-3923-41d9-aa53-c4c00c815b36-kube-api-access-rn898\") pod \"3e89464b-3923-41d9-aa53-c4c00c815b36\" (UID: \"3e89464b-3923-41d9-aa53-c4c00c815b36\") "
	Jun 20 17:07:43 addons-705802 kubelet[2185]: I0620 17:07:43.153444    2185 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3e89464b-3923-41d9-aa53-c4c00c815b36-kube-api-access-rn898" (OuterVolumeSpecName: "kube-api-access-rn898") pod "3e89464b-3923-41d9-aa53-c4c00c815b36" (UID: "3e89464b-3923-41d9-aa53-c4c00c815b36"). InnerVolumeSpecName "kube-api-access-rn898". PluginName "kubernetes.io/projected", VolumeGidValue ""
	Jun 20 17:07:43 addons-705802 kubelet[2185]: I0620 17:07:43.252512    2185 reconciler_common.go:289] "Volume detached for volume \"kube-api-access-rn898\" (UniqueName: \"kubernetes.io/projected/3e89464b-3923-41d9-aa53-c4c00c815b36-kube-api-access-rn898\") on node \"addons-705802\" DevicePath \"\""
	Jun 20 17:07:43 addons-705802 kubelet[2185]: I0620 17:07:43.775329    2185 scope.go:117] "RemoveContainer" containerID="ed2da9f5bc9d8a66b8b1a0e36b4073fe5b250b230a3399d43a8b9a2aea1f1049"
	Jun 20 17:07:43 addons-705802 kubelet[2185]: I0620 17:07:43.970534    2185 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="06b1bea9-035f-4060-aec1-e44054c29e19" path="/var/lib/kubelet/pods/06b1bea9-035f-4060-aec1-e44054c29e19/volumes"
	Jun 20 17:07:43 addons-705802 kubelet[2185]: I0620 17:07:43.971147    2185 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3e89464b-3923-41d9-aa53-c4c00c815b36" path="/var/lib/kubelet/pods/3e89464b-3923-41d9-aa53-c4c00c815b36/volumes"
	Jun 20 17:07:43 addons-705802 kubelet[2185]: I0620 17:07:43.971647    2185 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e15d845-866d-4016-844c-195b6cd467f4" path="/var/lib/kubelet/pods/4e15d845-866d-4016-844c-195b6cd467f4/volumes"
	Jun 20 17:07:45 addons-705802 kubelet[2185]: I0620 17:07:45.961238    2185 scope.go:117] "RemoveContainer" containerID="5e254b93722f70824b0fce334e7f352474736f5801bfd5ef7e42663910fb423f"
	Jun 20 17:07:45 addons-705802 kubelet[2185]: I0620 17:07:45.972135    2185 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40913a12-38a1-4d28-9f52-0976549f721a" path="/var/lib/kubelet/pods/40913a12-38a1-4d28-9f52-0976549f721a/volumes"
	Jun 20 17:07:45 addons-705802 kubelet[2185]: I0620 17:07:45.972935    2185 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0058e37-925f-4182-864e-9d7d4a6d44e4" path="/var/lib/kubelet/pods/d0058e37-925f-4182-864e-9d7d4a6d44e4/volumes"
	Jun 20 17:07:46 addons-705802 kubelet[2185]: I0620 17:07:46.837866    2185 scope.go:117] "RemoveContainer" containerID="5e254b93722f70824b0fce334e7f352474736f5801bfd5ef7e42663910fb423f"
	Jun 20 17:07:46 addons-705802 kubelet[2185]: I0620 17:07:46.838290    2185 scope.go:117] "RemoveContainer" containerID="9b1719eaba0f6884df1d555ff8284cd7065c3de89f2dd3519beb618e4719273f"
	Jun 20 17:07:46 addons-705802 kubelet[2185]: E0620 17:07:46.838558    2185 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"hello-world-app\" with CrashLoopBackOff: \"back-off 20s restarting failed container=hello-world-app pod=hello-world-app-86c47465fc-qtr52_default(b2dfda91-be84-430c-8552-aa65aa887ee2)\"" pod="default/hello-world-app-86c47465fc-qtr52" podUID="b2dfda91-be84-430c-8552-aa65aa887ee2"
	Jun 20 17:07:47 addons-705802 kubelet[2185]: I0620 17:07:47.780874    2185 reconciler_common.go:161] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f7610b04-4f00-439a-a178-eb48d94edffa-webhook-cert\") pod \"f7610b04-4f00-439a-a178-eb48d94edffa\" (UID: \"f7610b04-4f00-439a-a178-eb48d94edffa\") "
	Jun 20 17:07:47 addons-705802 kubelet[2185]: I0620 17:07:47.781027    2185 reconciler_common.go:161] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ml7s5\" (UniqueName: \"kubernetes.io/projected/f7610b04-4f00-439a-a178-eb48d94edffa-kube-api-access-ml7s5\") pod \"f7610b04-4f00-439a-a178-eb48d94edffa\" (UID: \"f7610b04-4f00-439a-a178-eb48d94edffa\") "
	Jun 20 17:07:47 addons-705802 kubelet[2185]: I0620 17:07:47.783228    2185 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f7610b04-4f00-439a-a178-eb48d94edffa-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "f7610b04-4f00-439a-a178-eb48d94edffa" (UID: "f7610b04-4f00-439a-a178-eb48d94edffa"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue ""
	Jun 20 17:07:47 addons-705802 kubelet[2185]: I0620 17:07:47.786656    2185 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7610b04-4f00-439a-a178-eb48d94edffa-kube-api-access-ml7s5" (OuterVolumeSpecName: "kube-api-access-ml7s5") pod "f7610b04-4f00-439a-a178-eb48d94edffa" (UID: "f7610b04-4f00-439a-a178-eb48d94edffa"). InnerVolumeSpecName "kube-api-access-ml7s5". PluginName "kubernetes.io/projected", VolumeGidValue ""
	Jun 20 17:07:47 addons-705802 kubelet[2185]: I0620 17:07:47.864487    2185 scope.go:117] "RemoveContainer" containerID="f460eba2ff8579fcfdc0158aa99d3a9049e84e2e908fdcf46331fe7c9d981c26"
	Jun 20 17:07:47 addons-705802 kubelet[2185]: I0620 17:07:47.882773    2185 reconciler_common.go:289] "Volume detached for volume \"kube-api-access-ml7s5\" (UniqueName: \"kubernetes.io/projected/f7610b04-4f00-439a-a178-eb48d94edffa-kube-api-access-ml7s5\") on node \"addons-705802\" DevicePath \"\""
	Jun 20 17:07:47 addons-705802 kubelet[2185]: I0620 17:07:47.882943    2185 reconciler_common.go:289] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f7610b04-4f00-439a-a178-eb48d94edffa-webhook-cert\") on node \"addons-705802\" DevicePath \"\""
	Jun 20 17:07:47 addons-705802 kubelet[2185]: I0620 17:07:47.889619    2185 scope.go:117] "RemoveContainer" containerID="f460eba2ff8579fcfdc0158aa99d3a9049e84e2e908fdcf46331fe7c9d981c26"
	Jun 20 17:07:47 addons-705802 kubelet[2185]: E0620 17:07:47.890456    2185 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: f460eba2ff8579fcfdc0158aa99d3a9049e84e2e908fdcf46331fe7c9d981c26" containerID="f460eba2ff8579fcfdc0158aa99d3a9049e84e2e908fdcf46331fe7c9d981c26"
	Jun 20 17:07:47 addons-705802 kubelet[2185]: I0620 17:07:47.890493    2185 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"docker","ID":"f460eba2ff8579fcfdc0158aa99d3a9049e84e2e908fdcf46331fe7c9d981c26"} err="failed to get container status \"f460eba2ff8579fcfdc0158aa99d3a9049e84e2e908fdcf46331fe7c9d981c26\": rpc error: code = Unknown desc = Error response from daemon: No such container: f460eba2ff8579fcfdc0158aa99d3a9049e84e2e908fdcf46331fe7c9d981c26"
	Jun 20 17:07:47 addons-705802 kubelet[2185]: I0620 17:07:47.975259    2185 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7610b04-4f00-439a-a178-eb48d94edffa" path="/var/lib/kubelet/pods/f7610b04-4f00-439a-a178-eb48d94edffa/volumes"
	
	
	==> storage-provisioner [76efcb1684e9] <==
	I0620 17:02:59.494006       1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
	I0620 17:02:59.506936       1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
	I0620 17:02:59.509969       1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
	I0620 17:02:59.519324       1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
	I0620 17:02:59.520266       1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_addons-705802_35bf51df-2ef2-4733-b058-54cd7f22ea2f!
	I0620 17:02:59.521441       1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"ed868b88-184f-4c97-82e9-65493bf6ce34", APIVersion:"v1", ResourceVersion:"550", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' addons-705802_35bf51df-2ef2-4733-b058-54cd7f22ea2f became leader
	I0620 17:02:59.621396       1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_addons-705802_35bf51df-2ef2-4733-b058-54cd7f22ea2f!
	

                                                
                                                
-- /stdout --
helpers_test.go:254: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p addons-705802 -n addons-705802
helpers_test.go:261: (dbg) Run:  kubectl --context addons-705802 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:285: <<< TestAddons/parallel/Ingress FAILED: end of post-mortem logs <<<
helpers_test.go:286: ---------------------/post-mortem---------------------------------
--- FAIL: TestAddons/parallel/Ingress (36.58s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/SecondStart (376.34s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/SecondStart
start_stop_delete_test.go:256: (dbg) Run:  out/minikube-linux-arm64 start -p old-k8s-version-577369 --memory=2200 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker  --container-runtime=docker --kubernetes-version=v1.20.0
start_stop_delete_test.go:256: (dbg) Non-zero exit: out/minikube-linux-arm64 start -p old-k8s-version-577369 --memory=2200 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker  --container-runtime=docker --kubernetes-version=v1.20.0: exit status 102 (6m13.503137527s)

                                                
                                                
-- stdout --
	* [old-k8s-version-577369] minikube v1.33.1 on Ubuntu 20.04 (arm64)
	  - MINIKUBE_LOCATION=19106
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/19106-2452/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/19106-2452/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-arm64
	  - MINIKUBE_FORCE_SYSTEMD=
	* Kubernetes 1.30.2 is now available. If you would like to upgrade, specify: --kubernetes-version=v1.30.2
	* Using the docker driver based on existing profile
	* Starting "old-k8s-version-577369" primary control-plane node in "old-k8s-version-577369" cluster
	* Pulling base image v0.0.44-1718753665-19106 ...
	* Restarting existing docker container for "old-k8s-version-577369" ...
	* Preparing Kubernetes v1.20.0 on Docker 26.1.4 ...
	* Verifying Kubernetes components...
	  - Using image docker.io/kubernetesui/dashboard:v2.7.0
	  - Using image registry.k8s.io/echoserver:1.4
	  - Using image fake.domain/registry.k8s.io/echoserver:1.4
	  - Using image gcr.io/k8s-minikube/storage-provisioner:v5
	* Some dashboard features require the metrics-server addon. To enable all features please run:
	
		minikube -p old-k8s-version-577369 addons enable metrics-server
	
	* Enabled addons: metrics-server, storage-provisioner, dashboard, default-storageclass
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	I0620 18:05:02.043164  362254 out.go:291] Setting OutFile to fd 1 ...
	I0620 18:05:02.043404  362254 out.go:338] TERM=,COLORTERM=, which probably does not support color
	I0620 18:05:02.043432  362254 out.go:304] Setting ErrFile to fd 2...
	I0620 18:05:02.043452  362254 out.go:338] TERM=,COLORTERM=, which probably does not support color
	I0620 18:05:02.043757  362254 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19106-2452/.minikube/bin
	I0620 18:05:02.044225  362254 out.go:298] Setting JSON to false
	I0620 18:05:02.045498  362254 start.go:129] hostinfo: {"hostname":"ip-172-31-30-239","uptime":6453,"bootTime":1718900249,"procs":237,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1063-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"92f46a7d-c249-4c12-924a-77f64874c910"}
	I0620 18:05:02.045601  362254 start.go:139] virtualization:  
	I0620 18:05:02.048847  362254 out.go:177] * [old-k8s-version-577369] minikube v1.33.1 on Ubuntu 20.04 (arm64)
	I0620 18:05:02.051940  362254 out.go:177]   - MINIKUBE_LOCATION=19106
	I0620 18:05:02.051976  362254 notify.go:220] Checking for updates...
	I0620 18:05:02.056489  362254 out.go:177]   - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I0620 18:05:02.059194  362254 out.go:177]   - KUBECONFIG=/home/jenkins/minikube-integration/19106-2452/kubeconfig
	I0620 18:05:02.061643  362254 out.go:177]   - MINIKUBE_HOME=/home/jenkins/minikube-integration/19106-2452/.minikube
	I0620 18:05:02.063799  362254 out.go:177]   - MINIKUBE_BIN=out/minikube-linux-arm64
	I0620 18:05:02.065839  362254 out.go:177]   - MINIKUBE_FORCE_SYSTEMD=
	I0620 18:05:02.068821  362254 config.go:182] Loaded profile config "old-k8s-version-577369": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.20.0
	I0620 18:05:02.073736  362254 out.go:177] * Kubernetes 1.30.2 is now available. If you would like to upgrade, specify: --kubernetes-version=v1.30.2
	I0620 18:05:02.075963  362254 driver.go:392] Setting default libvirt URI to qemu:///system
	I0620 18:05:02.099277  362254 docker.go:122] docker version: linux-26.1.4:Docker Engine - Community
	I0620 18:05:02.099396  362254 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0620 18:05:02.194016  362254 info.go:266] docker info: {ID:6ZPO:QZND:VNGE:LUKL:4Y3K:XELL:AAX4:2GTK:E6LM:MPRN:3ZXR:TTMR Containers:2 ContainersRunning:1 ContainersPaused:0 ContainersStopped:1 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:32 OomKillDisable:true NGoroutines:52 SystemTime:2024-06-20 18:05:02.183740751 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1063-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aar
ch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214900736 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-30-239 Labels:[] ExperimentalBuild:false ServerVersion:26.1.4 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d2d58213f83a351ca8f528a95fbd145f5654e957 Expected:d2d58213f83a351ca8f528a95fbd145f5654e957} RuncCommit:{ID:v1.1.12-0-g51d5e94 Expected:v1.1.12-0-g51d5e94} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErro
rs:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.14.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.27.1]] Warnings:<nil>}}
	I0620 18:05:02.194188  362254 docker.go:295] overlay module found
	I0620 18:05:02.196622  362254 out.go:177] * Using the docker driver based on existing profile
	I0620 18:05:02.198542  362254 start.go:297] selected driver: docker
	I0620 18:05:02.198568  362254 start.go:901] validating driver "docker" against &{Name:old-k8s-version-577369 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.20.0 ClusterName:old-k8s-version-577369 Namespace:default APIServerHAVIP: AP
IServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.20.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountSt
ring:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I0620 18:05:02.198666  362254 start.go:912] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
	I0620 18:05:02.199399  362254 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0620 18:05:02.277488  362254 info.go:266] docker info: {ID:6ZPO:QZND:VNGE:LUKL:4Y3K:XELL:AAX4:2GTK:E6LM:MPRN:3ZXR:TTMR Containers:2 ContainersRunning:1 ContainersPaused:0 ContainersStopped:1 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:32 OomKillDisable:true NGoroutines:52 SystemTime:2024-06-20 18:05:02.268025856 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1063-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aar
ch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214900736 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-30-239 Labels:[] ExperimentalBuild:false ServerVersion:26.1.4 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d2d58213f83a351ca8f528a95fbd145f5654e957 Expected:d2d58213f83a351ca8f528a95fbd145f5654e957} RuncCommit:{ID:v1.1.12-0-g51d5e94 Expected:v1.1.12-0-g51d5e94} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErro
rs:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.14.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.27.1]] Warnings:<nil>}}
	I0620 18:05:02.277832  362254 start_flags.go:947] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
	I0620 18:05:02.277866  362254 cni.go:84] Creating CNI manager for ""
	I0620 18:05:02.277878  362254 cni.go:162] CNI unnecessary in this configuration, recommending no CNI
	I0620 18:05:02.277923  362254 start.go:340] cluster config:
	{Name:old-k8s-version-577369 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.20.0 ClusterName:old-k8s-version-577369 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local
ContainerRuntime:docker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.20.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountI
P: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I0620 18:05:02.280605  362254 out.go:177] * Starting "old-k8s-version-577369" primary control-plane node in "old-k8s-version-577369" cluster
	I0620 18:05:02.284655  362254 cache.go:121] Beginning downloading kic base image for docker with docker
	I0620 18:05:02.286489  362254 out.go:177] * Pulling base image v0.0.44-1718753665-19106 ...
	I0620 18:05:02.289062  362254 preload.go:132] Checking if preload exists for k8s version v1.20.0 and runtime docker
	I0620 18:05:02.289128  362254 preload.go:147] Found local preload: /home/jenkins/minikube-integration/19106-2452/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.20.0-docker-overlay2-arm64.tar.lz4
	I0620 18:05:02.289142  362254 cache.go:56] Caching tarball of preloaded images
	I0620 18:05:02.289237  362254 preload.go:173] Found /home/jenkins/minikube-integration/19106-2452/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.20.0-docker-overlay2-arm64.tar.lz4 in cache, skipping download
	I0620 18:05:02.289252  362254 cache.go:59] Finished verifying existence of preloaded tar for v1.20.0 on docker
	I0620 18:05:02.289360  362254 profile.go:143] Saving config to /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/old-k8s-version-577369/config.json ...
	I0620 18:05:02.289565  362254 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 in local docker daemon
	I0620 18:05:02.313717  362254 image.go:83] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 in local docker daemon, skipping pull
	I0620 18:05:02.313745  362254 cache.go:144] gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 exists in daemon, skipping load
	I0620 18:05:02.313764  362254 cache.go:194] Successfully downloaded all kic artifacts
	I0620 18:05:02.313791  362254 start.go:360] acquireMachinesLock for old-k8s-version-577369: {Name:mk9b1b6d4b408a6c11c5d4aaff15ef14fb8ad6d5 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0620 18:05:02.313868  362254 start.go:364] duration metric: took 46.744µs to acquireMachinesLock for "old-k8s-version-577369"
	I0620 18:05:02.313892  362254 start.go:96] Skipping create...Using existing machine configuration
	I0620 18:05:02.313901  362254 fix.go:54] fixHost starting: 
	I0620 18:05:02.314184  362254 cli_runner.go:164] Run: docker container inspect old-k8s-version-577369 --format={{.State.Status}}
	I0620 18:05:02.344504  362254 fix.go:112] recreateIfNeeded on old-k8s-version-577369: state=Stopped err=<nil>
	W0620 18:05:02.344543  362254 fix.go:138] unexpected machine state, will restart: <nil>
	I0620 18:05:02.346773  362254 out.go:177] * Restarting existing docker container for "old-k8s-version-577369" ...
	I0620 18:05:02.349594  362254 cli_runner.go:164] Run: docker start old-k8s-version-577369
	I0620 18:05:02.805846  362254 cli_runner.go:164] Run: docker container inspect old-k8s-version-577369 --format={{.State.Status}}
	I0620 18:05:02.827076  362254 kic.go:430] container "old-k8s-version-577369" state is running.
	I0620 18:05:02.831790  362254 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-577369
	I0620 18:05:02.882583  362254 profile.go:143] Saving config to /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/old-k8s-version-577369/config.json ...
	I0620 18:05:02.882820  362254 machine.go:94] provisionDockerMachine start ...
	I0620 18:05:02.882894  362254 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-577369
	I0620 18:05:02.922775  362254 main.go:141] libmachine: Using SSH client type: native
	I0620 18:05:02.923122  362254 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e2bc0] 0x3e5420 <nil>  [] 0s} 127.0.0.1 33128 <nil> <nil>}
	I0620 18:05:02.923142  362254 main.go:141] libmachine: About to run SSH command:
	hostname
	I0620 18:05:02.924004  362254 main.go:141] libmachine: Error dialing TCP: ssh: handshake failed: EOF
	I0620 18:05:06.063326  362254 main.go:141] libmachine: SSH cmd err, output: <nil>: old-k8s-version-577369
	
	I0620 18:05:06.063351  362254 ubuntu.go:169] provisioning hostname "old-k8s-version-577369"
	I0620 18:05:06.063422  362254 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-577369
	I0620 18:05:06.082204  362254 main.go:141] libmachine: Using SSH client type: native
	I0620 18:05:06.082475  362254 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e2bc0] 0x3e5420 <nil>  [] 0s} 127.0.0.1 33128 <nil> <nil>}
	I0620 18:05:06.082493  362254 main.go:141] libmachine: About to run SSH command:
	sudo hostname old-k8s-version-577369 && echo "old-k8s-version-577369" | sudo tee /etc/hostname
	I0620 18:05:06.233824  362254 main.go:141] libmachine: SSH cmd err, output: <nil>: old-k8s-version-577369
	
	I0620 18:05:06.233910  362254 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-577369
	I0620 18:05:06.251525  362254 main.go:141] libmachine: Using SSH client type: native
	I0620 18:05:06.251887  362254 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e2bc0] 0x3e5420 <nil>  [] 0s} 127.0.0.1 33128 <nil> <nil>}
	I0620 18:05:06.251913  362254 main.go:141] libmachine: About to run SSH command:
	
			if ! grep -xq '.*\sold-k8s-version-577369' /etc/hosts; then
				if grep -xq '127.0.1.1\s.*' /etc/hosts; then
					sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 old-k8s-version-577369/g' /etc/hosts;
				else 
					echo '127.0.1.1 old-k8s-version-577369' | sudo tee -a /etc/hosts; 
				fi
			fi
	I0620 18:05:06.387362  362254 main.go:141] libmachine: SSH cmd err, output: <nil>: 
	I0620 18:05:06.387388  362254 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/19106-2452/.minikube CaCertPath:/home/jenkins/minikube-integration/19106-2452/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/19106-2452/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/19106-2452/.minikube}
	I0620 18:05:06.387419  362254 ubuntu.go:177] setting up certificates
	I0620 18:05:06.387429  362254 provision.go:84] configureAuth start
	I0620 18:05:06.387493  362254 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-577369
	I0620 18:05:06.409505  362254 provision.go:143] copyHostCerts
	I0620 18:05:06.409583  362254 exec_runner.go:144] found /home/jenkins/minikube-integration/19106-2452/.minikube/ca.pem, removing ...
	I0620 18:05:06.409597  362254 exec_runner.go:203] rm: /home/jenkins/minikube-integration/19106-2452/.minikube/ca.pem
	I0620 18:05:06.409674  362254 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19106-2452/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/19106-2452/.minikube/ca.pem (1078 bytes)
	I0620 18:05:06.409805  362254 exec_runner.go:144] found /home/jenkins/minikube-integration/19106-2452/.minikube/cert.pem, removing ...
	I0620 18:05:06.409816  362254 exec_runner.go:203] rm: /home/jenkins/minikube-integration/19106-2452/.minikube/cert.pem
	I0620 18:05:06.409848  362254 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19106-2452/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/19106-2452/.minikube/cert.pem (1123 bytes)
	I0620 18:05:06.409907  362254 exec_runner.go:144] found /home/jenkins/minikube-integration/19106-2452/.minikube/key.pem, removing ...
	I0620 18:05:06.409915  362254 exec_runner.go:203] rm: /home/jenkins/minikube-integration/19106-2452/.minikube/key.pem
	I0620 18:05:06.409940  362254 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19106-2452/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/19106-2452/.minikube/key.pem (1675 bytes)
	I0620 18:05:06.409992  362254 provision.go:117] generating server cert: /home/jenkins/minikube-integration/19106-2452/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/19106-2452/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/19106-2452/.minikube/certs/ca-key.pem org=jenkins.old-k8s-version-577369 san=[127.0.0.1 192.168.85.2 localhost minikube old-k8s-version-577369]
	I0620 18:05:06.667572  362254 provision.go:177] copyRemoteCerts
	I0620 18:05:06.667651  362254 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
	I0620 18:05:06.667696  362254 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-577369
	I0620 18:05:06.683724  362254 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33128 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/old-k8s-version-577369/id_rsa Username:docker}
	I0620 18:05:06.782852  362254 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
	I0620 18:05:06.806890  362254 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/machines/server.pem --> /etc/docker/server.pem (1233 bytes)
	I0620 18:05:06.830888  362254 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
	I0620 18:05:06.855372  362254 provision.go:87] duration metric: took 467.925213ms to configureAuth
	I0620 18:05:06.855403  362254 ubuntu.go:193] setting minikube options for container-runtime
	I0620 18:05:06.855606  362254 config.go:182] Loaded profile config "old-k8s-version-577369": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.20.0
	I0620 18:05:06.855669  362254 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-577369
	I0620 18:05:06.873422  362254 main.go:141] libmachine: Using SSH client type: native
	I0620 18:05:06.873681  362254 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e2bc0] 0x3e5420 <nil>  [] 0s} 127.0.0.1 33128 <nil> <nil>}
	I0620 18:05:06.873691  362254 main.go:141] libmachine: About to run SSH command:
	df --output=fstype / | tail -n 1
	I0620 18:05:07.005273  362254 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
	
	I0620 18:05:07.005308  362254 ubuntu.go:71] root file system type: overlay
	I0620 18:05:07.005430  362254 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
	I0620 18:05:07.005516  362254 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-577369
	I0620 18:05:07.023189  362254 main.go:141] libmachine: Using SSH client type: native
	I0620 18:05:07.023444  362254 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e2bc0] 0x3e5420 <nil>  [] 0s} 127.0.0.1 33128 <nil> <nil>}
	I0620 18:05:07.023528  362254 main.go:141] libmachine: About to run SSH command:
	sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
	Description=Docker Application Container Engine
	Documentation=https://docs.docker.com
	BindsTo=containerd.service
	After=network-online.target firewalld.service containerd.service
	Wants=network-online.target
	Requires=docker.socket
	StartLimitBurst=3
	StartLimitIntervalSec=60
	
	[Service]
	Type=notify
	Restart=on-failure
	
	
	
	# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
	# The base configuration already specifies an 'ExecStart=...' command. The first directive
	# here is to clear out that command inherited from the base configuration. Without this,
	# the command from the base configuration and the command specified here are treated as
	# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
	# will catch this invalid input and refuse to start the service with an error like:
	#  Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
	
	# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
	# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
	ExecStart=
	ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 
	ExecReload=/bin/kill -s HUP \$MAINPID
	
	# Having non-zero Limit*s causes performance problems due to accounting overhead
	# in the kernel. We recommend using cgroups to do container-local accounting.
	LimitNOFILE=infinity
	LimitNPROC=infinity
	LimitCORE=infinity
	
	# Uncomment TasksMax if your systemd version supports it.
	# Only systemd 226 and above support this version.
	TasksMax=infinity
	TimeoutStartSec=0
	
	# set delegate yes so that systemd does not reset the cgroups of docker containers
	Delegate=yes
	
	# kill only the docker process, not all processes in the cgroup
	KillMode=process
	
	[Install]
	WantedBy=multi-user.target
	" | sudo tee /lib/systemd/system/docker.service.new
	I0620 18:05:07.167582  362254 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
	Description=Docker Application Container Engine
	Documentation=https://docs.docker.com
	BindsTo=containerd.service
	After=network-online.target firewalld.service containerd.service
	Wants=network-online.target
	Requires=docker.socket
	StartLimitBurst=3
	StartLimitIntervalSec=60
	
	[Service]
	Type=notify
	Restart=on-failure
	
	
	
	# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
	# The base configuration already specifies an 'ExecStart=...' command. The first directive
	# here is to clear out that command inherited from the base configuration. Without this,
	# the command from the base configuration and the command specified here are treated as
	# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
	# will catch this invalid input and refuse to start the service with an error like:
	#  Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
	
	# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
	# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
	ExecStart=
	ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 
	ExecReload=/bin/kill -s HUP $MAINPID
	
	# Having non-zero Limit*s causes performance problems due to accounting overhead
	# in the kernel. We recommend using cgroups to do container-local accounting.
	LimitNOFILE=infinity
	LimitNPROC=infinity
	LimitCORE=infinity
	
	# Uncomment TasksMax if your systemd version supports it.
	# Only systemd 226 and above support this version.
	TasksMax=infinity
	TimeoutStartSec=0
	
	# set delegate yes so that systemd does not reset the cgroups of docker containers
	Delegate=yes
	
	# kill only the docker process, not all processes in the cgroup
	KillMode=process
	
	[Install]
	WantedBy=multi-user.target
	
	I0620 18:05:07.167704  362254 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-577369
	I0620 18:05:07.186269  362254 main.go:141] libmachine: Using SSH client type: native
	I0620 18:05:07.186558  362254 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e2bc0] 0x3e5420 <nil>  [] 0s} 127.0.0.1 33128 <nil> <nil>}
	I0620 18:05:07.186580  362254 main.go:141] libmachine: About to run SSH command:
	sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
	I0620 18:05:07.324192  362254 main.go:141] libmachine: SSH cmd err, output: <nil>: 
	I0620 18:05:07.324277  362254 machine.go:97] duration metric: took 4.441438755s to provisionDockerMachine
	I0620 18:05:07.324301  362254 start.go:293] postStartSetup for "old-k8s-version-577369" (driver="docker")
	I0620 18:05:07.324343  362254 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
	I0620 18:05:07.324435  362254 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
	I0620 18:05:07.324508  362254 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-577369
	I0620 18:05:07.345790  362254 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33128 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/old-k8s-version-577369/id_rsa Username:docker}
	I0620 18:05:07.441768  362254 ssh_runner.go:195] Run: cat /etc/os-release
	I0620 18:05:07.444962  362254 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
	I0620 18:05:07.445001  362254 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
	I0620 18:05:07.445018  362254 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
	I0620 18:05:07.445026  362254 info.go:137] Remote host: Ubuntu 22.04.4 LTS
	I0620 18:05:07.445040  362254 filesync.go:126] Scanning /home/jenkins/minikube-integration/19106-2452/.minikube/addons for local assets ...
	I0620 18:05:07.445107  362254 filesync.go:126] Scanning /home/jenkins/minikube-integration/19106-2452/.minikube/files for local assets ...
	I0620 18:05:07.445210  362254 filesync.go:149] local asset: /home/jenkins/minikube-integration/19106-2452/.minikube/files/etc/ssl/certs/77842.pem -> 77842.pem in /etc/ssl/certs
	I0620 18:05:07.445324  362254 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
	I0620 18:05:07.454680  362254 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/files/etc/ssl/certs/77842.pem --> /etc/ssl/certs/77842.pem (1708 bytes)
	I0620 18:05:07.479523  362254 start.go:296] duration metric: took 155.177307ms for postStartSetup
	I0620 18:05:07.479665  362254 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I0620 18:05:07.479746  362254 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-577369
	I0620 18:05:07.495983  362254 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33128 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/old-k8s-version-577369/id_rsa Username:docker}
	I0620 18:05:07.587896  362254 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
	I0620 18:05:07.592834  362254 fix.go:56] duration metric: took 5.278923959s for fixHost
	I0620 18:05:07.592861  362254 start.go:83] releasing machines lock for "old-k8s-version-577369", held for 5.27898073s
	I0620 18:05:07.592934  362254 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-577369
	I0620 18:05:07.608643  362254 ssh_runner.go:195] Run: cat /version.json
	I0620 18:05:07.608708  362254 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-577369
	I0620 18:05:07.608969  362254 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
	I0620 18:05:07.609037  362254 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-577369
	I0620 18:05:07.626012  362254 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33128 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/old-k8s-version-577369/id_rsa Username:docker}
	I0620 18:05:07.627183  362254 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33128 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/old-k8s-version-577369/id_rsa Username:docker}
	I0620 18:05:07.718614  362254 ssh_runner.go:195] Run: systemctl --version
	I0620 18:05:07.847712  362254 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
	I0620 18:05:07.852373  362254 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
	I0620 18:05:07.871102  362254 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
	I0620 18:05:07.871186  362254 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *bridge* -not -name *podman* -not -name *.mk_disabled -printf "%p, " -exec sh -c "sudo sed -i -r -e '/"dst": ".*:.*"/d' -e 's|^(.*)"dst": (.*)[,*]$|\1"dst": \2|g' -e '/"subnet": ".*:.*"/d' -e 's|^(.*)"subnet": ".*"(.*)[,*]$|\1"subnet": "10.244.0.0/16"\2|g' {}" ;
	I0620 18:05:07.888938  362254 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *podman* -not -name *.mk_disabled -printf "%p, " -exec sh -c "sudo sed -i -r -e 's|^(.*)"subnet": ".*"(.*)$|\1"subnet": "10.244.0.0/16"\2|g' -e 's|^(.*)"gateway": ".*"(.*)$|\1"gateway": "10.244.0.1"\2|g' {}" ;
	I0620 18:05:07.907746  362254 cni.go:308] configured [/etc/cni/net.d/100-crio-bridge.conf, /etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
	I0620 18:05:07.907778  362254 start.go:494] detecting cgroup driver to use...
	I0620 18:05:07.907813  362254 detect.go:196] detected "cgroupfs" cgroup driver on host os
	I0620 18:05:07.907925  362254 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
	" | sudo tee /etc/crictl.yaml"
	I0620 18:05:07.924926  362254 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.2"|' /etc/containerd/config.toml"
	I0620 18:05:07.935168  362254 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
	I0620 18:05:07.945988  362254 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
	I0620 18:05:07.946064  362254 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
	I0620 18:05:07.957103  362254 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I0620 18:05:07.967622  362254 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
	I0620 18:05:07.977556  362254 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I0620 18:05:07.987971  362254 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
	I0620 18:05:07.997590  362254 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
	I0620 18:05:08.009155  362254 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
	I0620 18:05:08.019954  362254 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
	I0620 18:05:08.030135  362254 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0620 18:05:08.127231  362254 ssh_runner.go:195] Run: sudo systemctl restart containerd
	I0620 18:05:08.256451  362254 start.go:494] detecting cgroup driver to use...
	I0620 18:05:08.256544  362254 detect.go:196] detected "cgroupfs" cgroup driver on host os
	I0620 18:05:08.256626  362254 ssh_runner.go:195] Run: sudo systemctl cat docker.service
	I0620 18:05:08.270873  362254 cruntime.go:279] skipping containerd shutdown because we are bound to it
	I0620 18:05:08.271020  362254 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
	I0620 18:05:08.285851  362254 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/dockershim.sock
	" | sudo tee /etc/crictl.yaml"
	I0620 18:05:08.305597  362254 ssh_runner.go:195] Run: which cri-dockerd
	I0620 18:05:08.310306  362254 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
	I0620 18:05:08.319532  362254 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (189 bytes)
	I0620 18:05:08.338691  362254 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
	I0620 18:05:08.486148  362254 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
	I0620 18:05:08.589718  362254 docker.go:574] configuring docker to use "cgroupfs" as cgroup driver...
	I0620 18:05:08.589894  362254 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
	I0620 18:05:08.611237  362254 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0620 18:05:08.711763  362254 ssh_runner.go:195] Run: sudo systemctl restart docker
	I0620 18:05:09.148859  362254 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
	I0620 18:05:09.172027  362254 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
	I0620 18:05:09.200782  362254 out.go:204] * Preparing Kubernetes v1.20.0 on Docker 26.1.4 ...
	I0620 18:05:09.200887  362254 cli_runner.go:164] Run: docker network inspect old-k8s-version-577369 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
	I0620 18:05:09.221480  362254 ssh_runner.go:195] Run: grep 192.168.85.1	host.minikube.internal$ /etc/hosts
	I0620 18:05:09.225183  362254 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.85.1	host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I0620 18:05:09.236844  362254 kubeadm.go:877] updating cluster {Name:old-k8s-version-577369 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.20.0 ClusterName:old-k8s-version-577369 Namespace:default APIServerHAVIP: APIServerName:minik
ubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.20.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkin
s:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
	I0620 18:05:09.236974  362254 preload.go:132] Checking if preload exists for k8s version v1.20.0 and runtime docker
	I0620 18:05:09.237036  362254 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
	I0620 18:05:09.255587  362254 docker.go:685] Got preloaded images: -- stdout --
	gcr.io/k8s-minikube/storage-provisioner:v5
	registry.k8s.io/kube-proxy:v1.20.0
	k8s.gcr.io/kube-proxy:v1.20.0
	k8s.gcr.io/kube-apiserver:v1.20.0
	registry.k8s.io/kube-apiserver:v1.20.0
	k8s.gcr.io/kube-controller-manager:v1.20.0
	registry.k8s.io/kube-controller-manager:v1.20.0
	k8s.gcr.io/kube-scheduler:v1.20.0
	registry.k8s.io/kube-scheduler:v1.20.0
	k8s.gcr.io/etcd:3.4.13-0
	registry.k8s.io/etcd:3.4.13-0
	k8s.gcr.io/coredns:1.7.0
	registry.k8s.io/coredns:1.7.0
	k8s.gcr.io/pause:3.2
	registry.k8s.io/pause:3.2
	gcr.io/k8s-minikube/busybox:1.28.4-glibc
	
	-- /stdout --
	I0620 18:05:09.255619  362254 docker.go:615] Images already preloaded, skipping extraction
	I0620 18:05:09.255688  362254 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
	I0620 18:05:09.274939  362254 docker.go:685] Got preloaded images: -- stdout --
	gcr.io/k8s-minikube/storage-provisioner:v5
	k8s.gcr.io/kube-proxy:v1.20.0
	registry.k8s.io/kube-proxy:v1.20.0
	k8s.gcr.io/kube-apiserver:v1.20.0
	registry.k8s.io/kube-apiserver:v1.20.0
	k8s.gcr.io/kube-controller-manager:v1.20.0
	registry.k8s.io/kube-controller-manager:v1.20.0
	k8s.gcr.io/kube-scheduler:v1.20.0
	registry.k8s.io/kube-scheduler:v1.20.0
	k8s.gcr.io/etcd:3.4.13-0
	registry.k8s.io/etcd:3.4.13-0
	k8s.gcr.io/coredns:1.7.0
	registry.k8s.io/coredns:1.7.0
	k8s.gcr.io/pause:3.2
	registry.k8s.io/pause:3.2
	gcr.io/k8s-minikube/busybox:1.28.4-glibc
	
	-- /stdout --
	I0620 18:05:09.274960  362254 cache_images.go:84] Images are preloaded, skipping loading
	I0620 18:05:09.274969  362254 kubeadm.go:928] updating node { 192.168.85.2 8443 v1.20.0 docker true true} ...
	I0620 18:05:09.275107  362254 kubeadm.go:940] kubelet [Unit]
	Wants=docker.socket
	
	[Service]
	ExecStart=
	ExecStart=/var/lib/minikube/binaries/v1.20.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --container-runtime=docker --hostname-override=old-k8s-version-577369 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.85.2
	
	[Install]
	 config:
	{KubernetesVersion:v1.20.0 ClusterName:old-k8s-version-577369 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
	I0620 18:05:09.275183  362254 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
	I0620 18:05:09.320269  362254 cni.go:84] Creating CNI manager for ""
	I0620 18:05:09.320300  362254 cni.go:162] CNI unnecessary in this configuration, recommending no CNI
	I0620 18:05:09.320309  362254 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
	I0620 18:05:09.320338  362254 kubeadm.go:181] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.85.2 APIServerPort:8443 KubernetesVersion:v1.20.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:old-k8s-version-577369 NodeName:old-k8s-version-577369 DNSDomain:cluster.local CRISocket:/var/run/dockershim.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.85.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.85.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticP
odPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:false}
	I0620 18:05:09.320485  362254 kubeadm.go:187] kubeadm config:
	apiVersion: kubeadm.k8s.io/v1beta2
	kind: InitConfiguration
	localAPIEndpoint:
	  advertiseAddress: 192.168.85.2
	  bindPort: 8443
	bootstrapTokens:
	  - groups:
	      - system:bootstrappers:kubeadm:default-node-token
	    ttl: 24h0m0s
	    usages:
	      - signing
	      - authentication
	nodeRegistration:
	  criSocket: /var/run/dockershim.sock
	  name: "old-k8s-version-577369"
	  kubeletExtraArgs:
	    node-ip: 192.168.85.2
	  taints: []
	---
	apiVersion: kubeadm.k8s.io/v1beta2
	kind: ClusterConfiguration
	apiServer:
	  certSANs: ["127.0.0.1", "localhost", "192.168.85.2"]
	  extraArgs:
	    enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
	controllerManager:
	  extraArgs:
	    allocate-node-cidrs: "true"
	    leader-elect: "false"
	scheduler:
	  extraArgs:
	    leader-elect: "false"
	certificatesDir: /var/lib/minikube/certs
	clusterName: mk
	controlPlaneEndpoint: control-plane.minikube.internal:8443
	dns:
	  type: CoreDNS
	etcd:
	  local:
	    dataDir: /var/lib/minikube/etcd
	    extraArgs:
	      proxy-refresh-interval: "70000"
	kubernetesVersion: v1.20.0
	networking:
	  dnsDomain: cluster.local
	  podSubnet: "10.244.0.0/16"
	  serviceSubnet: 10.96.0.0/12
	---
	apiVersion: kubelet.config.k8s.io/v1beta1
	kind: KubeletConfiguration
	authentication:
	  x509:
	    clientCAFile: /var/lib/minikube/certs/ca.crt
	cgroupDriver: cgroupfs
	hairpinMode: hairpin-veth
	runtimeRequestTimeout: 15m
	clusterDomain: "cluster.local"
	# disable disk resource management by default
	imageGCHighThresholdPercent: 100
	evictionHard:
	  nodefs.available: "0%"
	  nodefs.inodesFree: "0%"
	  imagefs.available: "0%"
	failSwapOn: false
	staticPodPath: /etc/kubernetes/manifests
	---
	apiVersion: kubeproxy.config.k8s.io/v1alpha1
	kind: KubeProxyConfiguration
	clusterCIDR: "10.244.0.0/16"
	metricsBindAddress: 0.0.0.0:10249
	conntrack:
	  maxPerCore: 0
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
	  tcpEstablishedTimeout: 0s
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
	  tcpCloseWaitTimeout: 0s
	
	I0620 18:05:09.320555  362254 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.20.0
	I0620 18:05:09.329466  362254 binaries.go:44] Found k8s binaries, skipping transfer
	I0620 18:05:09.329535  362254 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
	I0620 18:05:09.338188  362254 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (348 bytes)
	I0620 18:05:09.357370  362254 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
	I0620 18:05:09.377246  362254 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2118 bytes)
	I0620 18:05:09.397326  362254 ssh_runner.go:195] Run: grep 192.168.85.2	control-plane.minikube.internal$ /etc/hosts
	I0620 18:05:09.401022  362254 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.85.2	control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I0620 18:05:09.412824  362254 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0620 18:05:09.494437  362254 ssh_runner.go:195] Run: sudo systemctl start kubelet
	I0620 18:05:09.511408  362254 certs.go:68] Setting up /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/old-k8s-version-577369 for IP: 192.168.85.2
	I0620 18:05:09.511438  362254 certs.go:194] generating shared ca certs ...
	I0620 18:05:09.511456  362254 certs.go:226] acquiring lock for ca certs: {Name:mk1f8a102b3933d1e67f4b3f5a97c6bde91190df Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0620 18:05:09.511660  362254 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/19106-2452/.minikube/ca.key
	I0620 18:05:09.511745  362254 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/19106-2452/.minikube/proxy-client-ca.key
	I0620 18:05:09.511759  362254 certs.go:256] generating profile certs ...
	I0620 18:05:09.511863  362254 certs.go:359] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/old-k8s-version-577369/client.key
	I0620 18:05:09.511948  362254 certs.go:359] skipping valid signed profile cert regeneration for "minikube": /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/old-k8s-version-577369/apiserver.key.88bbc3ef
	I0620 18:05:09.512015  362254 certs.go:359] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/old-k8s-version-577369/proxy-client.key
	I0620 18:05:09.512177  362254 certs.go:484] found cert: /home/jenkins/minikube-integration/19106-2452/.minikube/certs/7784.pem (1338 bytes)
	W0620 18:05:09.512231  362254 certs.go:480] ignoring /home/jenkins/minikube-integration/19106-2452/.minikube/certs/7784_empty.pem, impossibly tiny 0 bytes
	I0620 18:05:09.512247  362254 certs.go:484] found cert: /home/jenkins/minikube-integration/19106-2452/.minikube/certs/ca-key.pem (1675 bytes)
	I0620 18:05:09.512277  362254 certs.go:484] found cert: /home/jenkins/minikube-integration/19106-2452/.minikube/certs/ca.pem (1078 bytes)
	I0620 18:05:09.512338  362254 certs.go:484] found cert: /home/jenkins/minikube-integration/19106-2452/.minikube/certs/cert.pem (1123 bytes)
	I0620 18:05:09.512370  362254 certs.go:484] found cert: /home/jenkins/minikube-integration/19106-2452/.minikube/certs/key.pem (1675 bytes)
	I0620 18:05:09.512436  362254 certs.go:484] found cert: /home/jenkins/minikube-integration/19106-2452/.minikube/files/etc/ssl/certs/77842.pem (1708 bytes)
	I0620 18:05:09.513191  362254 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
	I0620 18:05:09.542581  362254 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
	I0620 18:05:09.568798  362254 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
	I0620 18:05:09.594096  362254 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
	I0620 18:05:09.619811  362254 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/old-k8s-version-577369/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1432 bytes)
	I0620 18:05:09.644618  362254 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/old-k8s-version-577369/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
	I0620 18:05:09.683724  362254 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/old-k8s-version-577369/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
	I0620 18:05:09.718279  362254 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/old-k8s-version-577369/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
	I0620 18:05:09.749046  362254 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
	I0620 18:05:09.786813  362254 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/certs/7784.pem --> /usr/share/ca-certificates/7784.pem (1338 bytes)
	I0620 18:05:09.820250  362254 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/files/etc/ssl/certs/77842.pem --> /usr/share/ca-certificates/77842.pem (1708 bytes)
	I0620 18:05:09.846958  362254 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
	I0620 18:05:09.866653  362254 ssh_runner.go:195] Run: openssl version
	I0620 18:05:09.872211  362254 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/77842.pem && ln -fs /usr/share/ca-certificates/77842.pem /etc/ssl/certs/77842.pem"
	I0620 18:05:09.881974  362254 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/77842.pem
	I0620 18:05:09.885645  362254 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Jun 20 17:09 /usr/share/ca-certificates/77842.pem
	I0620 18:05:09.885778  362254 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/77842.pem
	I0620 18:05:09.893019  362254 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/77842.pem /etc/ssl/certs/3ec20f2e.0"
	I0620 18:05:09.901984  362254 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
	I0620 18:05:09.911668  362254 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
	I0620 18:05:09.915325  362254 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Jun 20 17:02 /usr/share/ca-certificates/minikubeCA.pem
	I0620 18:05:09.915400  362254 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
	I0620 18:05:09.922504  362254 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
	I0620 18:05:09.931799  362254 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/7784.pem && ln -fs /usr/share/ca-certificates/7784.pem /etc/ssl/certs/7784.pem"
	I0620 18:05:09.941599  362254 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/7784.pem
	I0620 18:05:09.945229  362254 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Jun 20 17:09 /usr/share/ca-certificates/7784.pem
	I0620 18:05:09.945314  362254 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/7784.pem
	I0620 18:05:09.952397  362254 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/7784.pem /etc/ssl/certs/51391683.0"
	I0620 18:05:09.961670  362254 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
	I0620 18:05:09.965732  362254 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-etcd-client.crt -checkend 86400
	I0620 18:05:09.973123  362254 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-kubelet-client.crt -checkend 86400
	I0620 18:05:09.980371  362254 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/server.crt -checkend 86400
	I0620 18:05:09.987722  362254 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/healthcheck-client.crt -checkend 86400
	I0620 18:05:09.994956  362254 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/peer.crt -checkend 86400
	I0620 18:05:10.005268  362254 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/front-proxy-client.crt -checkend 86400
	I0620 18:05:10.014434  362254 kubeadm.go:391] StartCluster: {Name:old-k8s-version-577369 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.20.0 ClusterName:old-k8s-version-577369 Namespace:default APIServerHAVIP: APIServerName:minikube
CA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.20.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/
minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I0620 18:05:10.014652  362254 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
	I0620 18:05:10.036766  362254 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
	W0620 18:05:10.047735  362254 kubeadm.go:404] apiserver tunnel failed: apiserver port not set
	I0620 18:05:10.047759  362254 kubeadm.go:407] found existing configuration files, will attempt cluster restart
	I0620 18:05:10.047765  362254 kubeadm.go:587] restartPrimaryControlPlane start ...
	I0620 18:05:10.047838  362254 ssh_runner.go:195] Run: sudo test -d /data/minikube
	I0620 18:05:10.057508  362254 kubeadm.go:129] /data/minikube skipping compat symlinks: sudo test -d /data/minikube: Process exited with status 1
	stdout:
	
	stderr:
	I0620 18:05:10.058492  362254 kubeconfig.go:47] verify endpoint returned: get endpoint: "old-k8s-version-577369" does not appear in /home/jenkins/minikube-integration/19106-2452/kubeconfig
	I0620 18:05:10.059077  362254 kubeconfig.go:62] /home/jenkins/minikube-integration/19106-2452/kubeconfig needs updating (will repair): [kubeconfig missing "old-k8s-version-577369" cluster setting kubeconfig missing "old-k8s-version-577369" context setting]
	I0620 18:05:10.059901  362254 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19106-2452/kubeconfig: {Name:mk967cf82c0948dae02e8ac8d029ebc6e66977ec Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0620 18:05:10.061729  362254 ssh_runner.go:195] Run: sudo diff -u /var/tmp/minikube/kubeadm.yaml /var/tmp/minikube/kubeadm.yaml.new
	I0620 18:05:10.072189  362254 kubeadm.go:624] The running cluster does not require reconfiguration: 192.168.85.2
	I0620 18:05:10.072273  362254 kubeadm.go:591] duration metric: took 24.502149ms to restartPrimaryControlPlane
	I0620 18:05:10.072292  362254 kubeadm.go:393] duration metric: took 57.866944ms to StartCluster
	I0620 18:05:10.072309  362254 settings.go:142] acquiring lock: {Name:mk6241da33092f9e98cd6bf3e519e03a5a9ec197 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0620 18:05:10.072382  362254 settings.go:150] Updating kubeconfig:  /home/jenkins/minikube-integration/19106-2452/kubeconfig
	I0620 18:05:10.073905  362254 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19106-2452/kubeconfig: {Name:mk967cf82c0948dae02e8ac8d029ebc6e66977ec Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0620 18:05:10.074230  362254 start.go:234] Will wait 6m0s for node &{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.20.0 ContainerRuntime:docker ControlPlane:true Worker:true}
	I0620 18:05:10.074531  362254 config.go:182] Loaded profile config "old-k8s-version-577369": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.20.0
	I0620 18:05:10.074604  362254 addons.go:507] enable addons start: toEnable=map[ambassador:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:true default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false helm-tiller:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
	I0620 18:05:10.074721  362254 addons.go:69] Setting default-storageclass=true in profile "old-k8s-version-577369"
	I0620 18:05:10.074733  362254 addons.go:69] Setting dashboard=true in profile "old-k8s-version-577369"
	I0620 18:05:10.074793  362254 addons.go:234] Setting addon dashboard=true in "old-k8s-version-577369"
	W0620 18:05:10.074826  362254 addons.go:243] addon dashboard should already be in state true
	I0620 18:05:10.074874  362254 host.go:66] Checking if "old-k8s-version-577369" exists ...
	I0620 18:05:10.074758  362254 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "old-k8s-version-577369"
	I0620 18:05:10.075496  362254 cli_runner.go:164] Run: docker container inspect old-k8s-version-577369 --format={{.State.Status}}
	I0620 18:05:10.075609  362254 cli_runner.go:164] Run: docker container inspect old-k8s-version-577369 --format={{.State.Status}}
	I0620 18:05:10.074728  362254 addons.go:69] Setting metrics-server=true in profile "old-k8s-version-577369"
	I0620 18:05:10.076101  362254 addons.go:234] Setting addon metrics-server=true in "old-k8s-version-577369"
	W0620 18:05:10.076121  362254 addons.go:243] addon metrics-server should already be in state true
	I0620 18:05:10.076150  362254 host.go:66] Checking if "old-k8s-version-577369" exists ...
	I0620 18:05:10.076745  362254 cli_runner.go:164] Run: docker container inspect old-k8s-version-577369 --format={{.State.Status}}
	I0620 18:05:10.074721  362254 addons.go:69] Setting storage-provisioner=true in profile "old-k8s-version-577369"
	I0620 18:05:10.080055  362254 addons.go:234] Setting addon storage-provisioner=true in "old-k8s-version-577369"
	W0620 18:05:10.080078  362254 addons.go:243] addon storage-provisioner should already be in state true
	I0620 18:05:10.080110  362254 host.go:66] Checking if "old-k8s-version-577369" exists ...
	I0620 18:05:10.080590  362254 cli_runner.go:164] Run: docker container inspect old-k8s-version-577369 --format={{.State.Status}}
	I0620 18:05:10.081030  362254 out.go:177] * Verifying Kubernetes components...
	I0620 18:05:10.088001  362254 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0620 18:05:10.128224  362254 out.go:177]   - Using image docker.io/kubernetesui/dashboard:v2.7.0
	I0620 18:05:10.128851  362254 addons.go:234] Setting addon default-storageclass=true in "old-k8s-version-577369"
	W0620 18:05:10.128865  362254 addons.go:243] addon default-storageclass should already be in state true
	I0620 18:05:10.128894  362254 host.go:66] Checking if "old-k8s-version-577369" exists ...
	I0620 18:05:10.129406  362254 cli_runner.go:164] Run: docker container inspect old-k8s-version-577369 --format={{.State.Status}}
	I0620 18:05:10.135297  362254 out.go:177]   - Using image registry.k8s.io/echoserver:1.4
	I0620 18:05:10.140434  362254 addons.go:431] installing /etc/kubernetes/addons/dashboard-ns.yaml
	I0620 18:05:10.140461  362254 ssh_runner.go:362] scp dashboard/dashboard-ns.yaml --> /etc/kubernetes/addons/dashboard-ns.yaml (759 bytes)
	I0620 18:05:10.140532  362254 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-577369
	I0620 18:05:10.141010  362254 out.go:177]   - Using image fake.domain/registry.k8s.io/echoserver:1.4
	I0620 18:05:10.142637  362254 addons.go:431] installing /etc/kubernetes/addons/metrics-apiservice.yaml
	I0620 18:05:10.142657  362254 ssh_runner.go:362] scp metrics-server/metrics-apiservice.yaml --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
	I0620 18:05:10.142730  362254 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-577369
	I0620 18:05:10.154380  362254 out.go:177]   - Using image gcr.io/k8s-minikube/storage-provisioner:v5
	I0620 18:05:10.156683  362254 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner.yaml
	I0620 18:05:10.156705  362254 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
	I0620 18:05:10.156772  362254 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-577369
	I0620 18:05:10.185857  362254 addons.go:431] installing /etc/kubernetes/addons/storageclass.yaml
	I0620 18:05:10.185878  362254 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
	I0620 18:05:10.185944  362254 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-577369
	I0620 18:05:10.215157  362254 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33128 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/old-k8s-version-577369/id_rsa Username:docker}
	I0620 18:05:10.216163  362254 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33128 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/old-k8s-version-577369/id_rsa Username:docker}
	I0620 18:05:10.228904  362254 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33128 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/old-k8s-version-577369/id_rsa Username:docker}
	I0620 18:05:10.235002  362254 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33128 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/old-k8s-version-577369/id_rsa Username:docker}
	I0620 18:05:10.255097  362254 ssh_runner.go:195] Run: sudo systemctl start kubelet
	I0620 18:05:10.295504  362254 node_ready.go:35] waiting up to 6m0s for node "old-k8s-version-577369" to be "Ready" ...
	I0620 18:05:10.365152  362254 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
	I0620 18:05:10.380726  362254 addons.go:431] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
	I0620 18:05:10.380752  362254 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1825 bytes)
	I0620 18:05:10.396366  362254 addons.go:431] installing /etc/kubernetes/addons/dashboard-clusterrole.yaml
	I0620 18:05:10.396416  362254 ssh_runner.go:362] scp dashboard/dashboard-clusterrole.yaml --> /etc/kubernetes/addons/dashboard-clusterrole.yaml (1001 bytes)
	I0620 18:05:10.417045  362254 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
	I0620 18:05:10.430817  362254 addons.go:431] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
	I0620 18:05:10.430844  362254 ssh_runner.go:362] scp metrics-server/metrics-server-rbac.yaml --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
	I0620 18:05:10.446542  362254 addons.go:431] installing /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml
	I0620 18:05:10.446575  362254 ssh_runner.go:362] scp dashboard/dashboard-clusterrolebinding.yaml --> /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml (1018 bytes)
	I0620 18:05:10.479537  362254 addons.go:431] installing /etc/kubernetes/addons/metrics-server-service.yaml
	I0620 18:05:10.479563  362254 ssh_runner.go:362] scp metrics-server/metrics-server-service.yaml --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
	I0620 18:05:10.497986  362254 addons.go:431] installing /etc/kubernetes/addons/dashboard-configmap.yaml
	I0620 18:05:10.498012  362254 ssh_runner.go:362] scp dashboard/dashboard-configmap.yaml --> /etc/kubernetes/addons/dashboard-configmap.yaml (837 bytes)
	W0620 18:05:10.523920  362254 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:10.523959  362254 retry.go:31] will retry after 348.212297ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:10.533552  362254 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
	I0620 18:05:10.569621  362254 addons.go:431] installing /etc/kubernetes/addons/dashboard-dp.yaml
	I0620 18:05:10.569647  362254 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/dashboard-dp.yaml (4201 bytes)
	W0620 18:05:10.599467  362254 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:10.599502  362254 retry.go:31] will retry after 183.993197ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:10.614127  362254 addons.go:431] installing /etc/kubernetes/addons/dashboard-role.yaml
	I0620 18:05:10.614171  362254 ssh_runner.go:362] scp dashboard/dashboard-role.yaml --> /etc/kubernetes/addons/dashboard-role.yaml (1724 bytes)
	I0620 18:05:10.633610  362254 addons.go:431] installing /etc/kubernetes/addons/dashboard-rolebinding.yaml
	I0620 18:05:10.633646  362254 ssh_runner.go:362] scp dashboard/dashboard-rolebinding.yaml --> /etc/kubernetes/addons/dashboard-rolebinding.yaml (1046 bytes)
	W0620 18:05:10.646065  362254 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:10.646100  362254 retry.go:31] will retry after 318.239177ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:10.653842  362254 addons.go:431] installing /etc/kubernetes/addons/dashboard-sa.yaml
	I0620 18:05:10.653881  362254 ssh_runner.go:362] scp dashboard/dashboard-sa.yaml --> /etc/kubernetes/addons/dashboard-sa.yaml (837 bytes)
	I0620 18:05:10.672462  362254 addons.go:431] installing /etc/kubernetes/addons/dashboard-secret.yaml
	I0620 18:05:10.672529  362254 ssh_runner.go:362] scp dashboard/dashboard-secret.yaml --> /etc/kubernetes/addons/dashboard-secret.yaml (1389 bytes)
	I0620 18:05:10.691048  362254 addons.go:431] installing /etc/kubernetes/addons/dashboard-svc.yaml
	I0620 18:05:10.691070  362254 ssh_runner.go:362] scp dashboard/dashboard-svc.yaml --> /etc/kubernetes/addons/dashboard-svc.yaml (1294 bytes)
	I0620 18:05:10.712106  362254 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
	I0620 18:05:10.784354  362254 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml
	W0620 18:05:10.789262  362254 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:10.789296  362254 retry.go:31] will retry after 355.346233ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	W0620 18:05:10.857982  362254 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:10.858012  362254 retry.go:31] will retry after 253.429944ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:10.873224  362254 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml
	W0620 18:05:10.954848  362254 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:10.954924  362254 retry.go:31] will retry after 297.412948ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:10.965238  362254 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
	W0620 18:05:11.044109  362254 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:11.044139  362254 retry.go:31] will retry after 321.86944ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:11.112560  362254 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml
	I0620 18:05:11.145149  362254 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
	W0620 18:05:11.216246  362254 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:11.216319  362254 retry.go:31] will retry after 712.41259ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	W0620 18:05:11.236765  362254 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:11.236795  362254 retry.go:31] will retry after 449.332514ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:11.252917  362254 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml
	W0620 18:05:11.330686  362254 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:11.330719  362254 retry.go:31] will retry after 351.587159ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:11.366940  362254 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
	W0620 18:05:11.442096  362254 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:11.442128  362254 retry.go:31] will retry after 695.613361ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:11.682459  362254 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml
	I0620 18:05:11.687059  362254 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
	W0620 18:05:11.787588  362254 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:11.787635  362254 retry.go:31] will retry after 964.337127ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	W0620 18:05:11.812129  362254 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:11.812160  362254 retry.go:31] will retry after 704.852171ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:11.929526  362254 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml
	W0620 18:05:12.001432  362254 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:12.001470  362254 retry.go:31] will retry after 922.118104ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:12.138972  362254 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
	W0620 18:05:12.215567  362254 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:12.215603  362254 retry.go:31] will retry after 861.092518ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:12.296191  362254 node_ready.go:53] error getting node "old-k8s-version-577369": Get "https://192.168.85.2:8443/api/v1/nodes/old-k8s-version-577369": dial tcp 192.168.85.2:8443: connect: connection refused
	I0620 18:05:12.517727  362254 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
	W0620 18:05:12.602267  362254 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:12.602348  362254 retry.go:31] will retry after 1.141858807s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:12.752580  362254 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml
	W0620 18:05:12.835429  362254 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:12.835468  362254 retry.go:31] will retry after 1.341747918s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:12.923841  362254 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml
	W0620 18:05:13.014536  362254 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:13.014570  362254 retry.go:31] will retry after 934.413097ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:13.077724  362254 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
	W0620 18:05:13.214445  362254 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:13.214514  362254 retry.go:31] will retry after 1.178031532s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:13.744660  362254 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
	I0620 18:05:13.949669  362254 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml
	W0620 18:05:13.976536  362254 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:13.976565  362254 retry.go:31] will retry after 1.135116191s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:14.178159  362254 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml
	I0620 18:05:14.298688  362254 node_ready.go:53] error getting node "old-k8s-version-577369": Get "https://192.168.85.2:8443/api/v1/nodes/old-k8s-version-577369": dial tcp 192.168.85.2:8443: connect: connection refused
	I0620 18:05:14.393475  362254 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
	W0620 18:05:14.410667  362254 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:14.410696  362254 retry.go:31] will retry after 2.346773398s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	W0620 18:05:14.410732  362254 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:14.410738  362254 retry.go:31] will retry after 2.203813949s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	W0620 18:05:14.671964  362254 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:14.671992  362254 retry.go:31] will retry after 1.638542745s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0620 18:05:15.112099  362254 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
	I0620 18:05:16.311386  362254 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
	I0620 18:05:16.614801  362254 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml
	I0620 18:05:16.758616  362254 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml
	I0620 18:05:23.756129  362254 node_ready.go:49] node "old-k8s-version-577369" has status "Ready":"True"
	I0620 18:05:23.756152  362254 node_ready.go:38] duration metric: took 13.460600585s for node "old-k8s-version-577369" to be "Ready" ...
	I0620 18:05:23.756162  362254 pod_ready.go:35] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
	I0620 18:05:24.140332  362254 pod_ready.go:78] waiting up to 6m0s for pod "coredns-74ff55c5b-92t8n" in "kube-system" namespace to be "Ready" ...
	I0620 18:05:24.396147  362254 pod_ready.go:92] pod "coredns-74ff55c5b-92t8n" in "kube-system" namespace has status "Ready":"True"
	I0620 18:05:24.396170  362254 pod_ready.go:81] duration metric: took 255.779187ms for pod "coredns-74ff55c5b-92t8n" in "kube-system" namespace to be "Ready" ...
	I0620 18:05:24.396181  362254 pod_ready.go:78] waiting up to 6m0s for pod "etcd-old-k8s-version-577369" in "kube-system" namespace to be "Ready" ...
	I0620 18:05:26.489531  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:05:26.804608  362254 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: (11.692455011s)
	I0620 18:05:26.804795  362254 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (10.49337593s)
	I0620 18:05:26.804814  362254 addons.go:475] Verifying addon metrics-server=true in "old-k8s-version-577369"
	I0620 18:05:26.804855  362254 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: (10.190024236s)
	I0620 18:05:26.804947  362254 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: (10.046298772s)
	I0620 18:05:26.807279  362254 out.go:177] * Some dashboard features require the metrics-server addon. To enable all features please run:
	
		minikube -p old-k8s-version-577369 addons enable metrics-server
	
	I0620 18:05:26.828751  362254 out.go:177] * Enabled addons: metrics-server, storage-provisioner, dashboard, default-storageclass
	I0620 18:05:26.830948  362254 addons.go:510] duration metric: took 16.756344987s for enable addons: enabled=[metrics-server storage-provisioner dashboard default-storageclass]
	I0620 18:05:28.904497  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:05:30.913014  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:05:32.962801  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:05:35.403188  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:05:37.902145  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:05:39.903616  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:05:41.903878  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:05:44.404313  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:05:46.904722  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:05:49.411946  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:05:51.905572  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:05:54.403345  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:05:56.903160  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:05:59.404173  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:06:01.406702  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:06:03.415590  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:06:05.902210  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:06:07.902785  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:06:10.402129  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:06:12.404012  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:06:14.906676  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:06:17.405088  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:06:19.903099  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:06:22.404115  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:06:24.902148  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:06:26.902212  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:06:28.902667  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:06:30.906114  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:06:32.906149  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:06:35.403503  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:06:37.405196  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:06:39.903362  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:06:41.903601  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:06:44.405671  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:06:46.412687  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:06:48.902060  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:06:50.902406  362254 pod_ready.go:102] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"False"
	I0620 18:06:52.404765  362254 pod_ready.go:92] pod "etcd-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"True"
	I0620 18:06:52.404791  362254 pod_ready.go:81] duration metric: took 1m28.008602156s for pod "etcd-old-k8s-version-577369" in "kube-system" namespace to be "Ready" ...
	I0620 18:06:52.404804  362254 pod_ready.go:78] waiting up to 6m0s for pod "kube-apiserver-old-k8s-version-577369" in "kube-system" namespace to be "Ready" ...
	I0620 18:06:52.410584  362254 pod_ready.go:92] pod "kube-apiserver-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"True"
	I0620 18:06:52.410608  362254 pod_ready.go:81] duration metric: took 5.796677ms for pod "kube-apiserver-old-k8s-version-577369" in "kube-system" namespace to be "Ready" ...
	I0620 18:06:52.410619  362254 pod_ready.go:78] waiting up to 6m0s for pod "kube-controller-manager-old-k8s-version-577369" in "kube-system" namespace to be "Ready" ...
	I0620 18:06:52.416168  362254 pod_ready.go:92] pod "kube-controller-manager-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"True"
	I0620 18:06:52.416193  362254 pod_ready.go:81] duration metric: took 5.566241ms for pod "kube-controller-manager-old-k8s-version-577369" in "kube-system" namespace to be "Ready" ...
	I0620 18:06:52.416204  362254 pod_ready.go:78] waiting up to 6m0s for pod "kube-proxy-qh57b" in "kube-system" namespace to be "Ready" ...
	I0620 18:06:52.421207  362254 pod_ready.go:92] pod "kube-proxy-qh57b" in "kube-system" namespace has status "Ready":"True"
	I0620 18:06:52.421231  362254 pod_ready.go:81] duration metric: took 5.019226ms for pod "kube-proxy-qh57b" in "kube-system" namespace to be "Ready" ...
	I0620 18:06:52.421244  362254 pod_ready.go:78] waiting up to 6m0s for pod "kube-scheduler-old-k8s-version-577369" in "kube-system" namespace to be "Ready" ...
	I0620 18:06:52.425972  362254 pod_ready.go:92] pod "kube-scheduler-old-k8s-version-577369" in "kube-system" namespace has status "Ready":"True"
	I0620 18:06:52.425996  362254 pod_ready.go:81] duration metric: took 4.743942ms for pod "kube-scheduler-old-k8s-version-577369" in "kube-system" namespace to be "Ready" ...
	I0620 18:06:52.426007  362254 pod_ready.go:78] waiting up to 6m0s for pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace to be "Ready" ...
	I0620 18:06:54.433282  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:06:56.931912  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:06:58.932386  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:07:00.933410  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:07:03.432160  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:07:05.432746  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:07:07.932399  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:07:09.932701  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:07:12.432841  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:07:14.484895  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:07:16.931977  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:07:18.932660  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:07:21.432540  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:07:23.936820  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:07:26.432901  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:07:28.932873  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:07:31.431733  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:07:33.432627  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:07:35.931868  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:07:37.932565  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:07:40.433402  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:07:42.932231  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:07:44.932578  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:07:47.432166  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:07:49.437652  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:07:51.931714  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:07:53.931751  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:07:55.932823  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:07:58.432113  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:08:00.446605  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:08:02.932548  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:08:04.937607  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:08:07.432482  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:08:09.432949  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:08:11.433117  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:08:13.931631  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:08:15.933023  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:08:18.432623  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:08:20.931947  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:08:23.432276  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:08:25.932042  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:08:27.932502  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:08:30.432837  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:08:32.433782  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:08:34.932712  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:08:37.431735  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:08:39.433077  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:08:41.932607  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:08:44.432206  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:08:46.932710  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:08:49.432633  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:08:51.940410  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:08:54.432991  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:08:56.932396  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:08:59.432327  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:09:01.931976  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:09:03.932339  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:09:05.932754  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:09:08.432551  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:09:10.432698  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:09:12.932430  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:09:15.433253  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:09:17.938368  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:09:20.432874  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:09:22.932519  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:09:25.433194  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:09:27.933027  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:09:30.438071  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:09:32.932220  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:09:34.936100  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:09:37.432407  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:09:39.931938  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:09:41.933548  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:09:44.432536  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:09:46.932947  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:09:48.940303  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:09:51.439825  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:09:53.933306  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:09:56.012496  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:09:58.432068  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:00.458844  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:02.931431  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:05.431874  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:07.433865  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:09.932314  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:11.933181  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:13.934449  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:16.444821  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:18.932133  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:20.932170  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:23.434756  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:25.932711  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:27.933287  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:30.433760  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:32.932786  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:34.932888  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:36.933040  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:38.933262  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:41.441396  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:43.954833  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:46.530593  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:48.932893  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:50.936039  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:52.432238  362254 pod_ready.go:81] duration metric: took 4m0.006216679s for pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace to be "Ready" ...
	E0620 18:10:52.432430  362254 pod_ready.go:66] WaitExtra: waitPodCondition: context deadline exceeded
	I0620 18:10:52.432456  362254 pod_ready.go:38] duration metric: took 5m28.676281099s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
	I0620 18:10:52.432502  362254 api_server.go:52] waiting for apiserver process to appear ...
	I0620 18:10:52.432625  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-apiserver --format={{.ID}}
	I0620 18:10:52.455740  362254 logs.go:276] 2 containers: [075e697d07c8 760f9a7d272c]
	I0620 18:10:52.455824  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_etcd --format={{.ID}}
	I0620 18:10:52.475939  362254 logs.go:276] 2 containers: [25a953b6e46d 81eaddbb4b45]
	I0620 18:10:52.476016  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_coredns --format={{.ID}}
	I0620 18:10:52.494299  362254 logs.go:276] 2 containers: [5534d9d547fd 7e60d81fce7f]
	I0620 18:10:52.494380  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-scheduler --format={{.ID}}
	I0620 18:10:52.512744  362254 logs.go:276] 2 containers: [4ed5438feb8b ab177be73cae]
	I0620 18:10:52.512827  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-proxy --format={{.ID}}
	I0620 18:10:52.536524  362254 logs.go:276] 2 containers: [0b32a67571bb c47b7591b320]
	I0620 18:10:52.536611  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-controller-manager --format={{.ID}}
	I0620 18:10:52.563389  362254 logs.go:276] 2 containers: [aab72b193950 a4eee0f3ea35]
	I0620 18:10:52.563475  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kindnet --format={{.ID}}
	I0620 18:10:52.582302  362254 logs.go:276] 0 containers: []
	W0620 18:10:52.582326  362254 logs.go:278] No container was found matching "kindnet"
	I0620 18:10:52.582383  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kubernetes-dashboard --format={{.ID}}
	I0620 18:10:52.622007  362254 logs.go:276] 1 containers: [49efdc60eccc]
	I0620 18:10:52.622087  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_storage-provisioner --format={{.ID}}
	I0620 18:10:52.650609  362254 logs.go:276] 2 containers: [74b467d165b4 16e28ef3ddc9]
	I0620 18:10:52.650645  362254 logs.go:123] Gathering logs for kube-apiserver [760f9a7d272c] ...
	I0620 18:10:52.650656  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 760f9a7d272c"
	I0620 18:10:52.730252  362254 logs.go:123] Gathering logs for kube-proxy [0b32a67571bb] ...
	I0620 18:10:52.730312  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 0b32a67571bb"
	I0620 18:10:52.753952  362254 logs.go:123] Gathering logs for kube-proxy [c47b7591b320] ...
	I0620 18:10:52.753983  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 c47b7591b320"
	I0620 18:10:52.788507  362254 logs.go:123] Gathering logs for kube-controller-manager [aab72b193950] ...
	I0620 18:10:52.788535  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 aab72b193950"
	I0620 18:10:52.863485  362254 logs.go:123] Gathering logs for container status ...
	I0620 18:10:52.863526  362254 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
	I0620 18:10:52.948470  362254 logs.go:123] Gathering logs for describe nodes ...
	I0620 18:10:52.948501  362254 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.20.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
	I0620 18:10:53.151745  362254 logs.go:123] Gathering logs for kube-apiserver [075e697d07c8] ...
	I0620 18:10:53.151776  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 075e697d07c8"
	I0620 18:10:53.215264  362254 logs.go:123] Gathering logs for etcd [81eaddbb4b45] ...
	I0620 18:10:53.215345  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 81eaddbb4b45"
	I0620 18:10:53.278713  362254 logs.go:123] Gathering logs for coredns [5534d9d547fd] ...
	I0620 18:10:53.278786  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 5534d9d547fd"
	I0620 18:10:53.303506  362254 logs.go:123] Gathering logs for storage-provisioner [74b467d165b4] ...
	I0620 18:10:53.303539  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 74b467d165b4"
	I0620 18:10:53.330579  362254 logs.go:123] Gathering logs for kubelet ...
	I0620 18:10:53.330606  362254 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
	W0620 18:10:53.394844  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:23 old-k8s-version-577369 kubelet[1201]: E0620 18:05:23.695585    1201 reflector.go:138] object-"kube-system"/"metrics-server-token-xd7wc": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "metrics-server-token-xd7wc" is forbidden: User "system:node:old-k8s-version-577369" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-577369' and this object
	W0620 18:10:53.395084  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:23 old-k8s-version-577369 kubelet[1201]: E0620 18:05:23.698643    1201 reflector.go:138] object-"kube-system"/"kube-proxy": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "kube-proxy" is forbidden: User "system:node:old-k8s-version-577369" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-577369' and this object
	W0620 18:10:53.395315  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:23 old-k8s-version-577369 kubelet[1201]: E0620 18:05:23.698704    1201 reflector.go:138] object-"kube-system"/"storage-provisioner-token-b9lfw": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "storage-provisioner-token-b9lfw" is forbidden: User "system:node:old-k8s-version-577369" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-577369' and this object
	W0620 18:10:53.395517  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:23 old-k8s-version-577369 kubelet[1201]: E0620 18:05:23.698918    1201 reflector.go:138] object-"kube-system"/"coredns": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "coredns" is forbidden: User "system:node:old-k8s-version-577369" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-577369' and this object
	W0620 18:10:53.395728  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:23 old-k8s-version-577369 kubelet[1201]: E0620 18:05:23.699130    1201 reflector.go:138] object-"kube-system"/"coredns-token-br6s7": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "coredns-token-br6s7" is forbidden: User "system:node:old-k8s-version-577369" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-577369' and this object
	W0620 18:10:53.395936  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:23 old-k8s-version-577369 kubelet[1201]: E0620 18:05:23.699223    1201 reflector.go:138] object-"default"/"default-token-9m8v4": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "default-token-9m8v4" is forbidden: User "system:node:old-k8s-version-577369" cannot list resource "secrets" in API group "" in the namespace "default": no relationship found between node 'old-k8s-version-577369' and this object
	W0620 18:10:53.396154  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:23 old-k8s-version-577369 kubelet[1201]: E0620 18:05:23.699291    1201 reflector.go:138] object-"kube-system"/"kube-proxy-token-kwb2s": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "kube-proxy-token-kwb2s" is forbidden: User "system:node:old-k8s-version-577369" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-577369' and this object
	W0620 18:10:53.402899  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:28 old-k8s-version-577369 kubelet[1201]: E0620 18:05:27.999585    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0620 18:10:53.403705  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:28 old-k8s-version-577369 kubelet[1201]: E0620 18:05:28.453263    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.406169  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:41 old-k8s-version-577369 kubelet[1201]: E0620 18:05:41.280792    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0620 18:10:53.410905  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:46 old-k8s-version-577369 kubelet[1201]: E0620 18:05:46.379764    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0620 18:10:53.411492  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:46 old-k8s-version-577369 kubelet[1201]: E0620 18:05:46.795712    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.411691  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:47 old-k8s-version-577369 kubelet[1201]: E0620 18:05:47.792472    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.412058  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:53 old-k8s-version-577369 kubelet[1201]: E0620 18:05:53.229484    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.412707  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:58 old-k8s-version-577369 kubelet[1201]: E0620 18:05:58.937844    1201 pod_workers.go:191] Error syncing pod 47a50d23-d504-4f3b-a3a1-97513673c10e ("storage-provisioner_kube-system(47a50d23-d504-4f3b-a3a1-97513673c10e)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 10s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(47a50d23-d504-4f3b-a3a1-97513673c10e)"
	W0620 18:10:53.415122  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:00 old-k8s-version-577369 kubelet[1201]: E0620 18:06:00.973843    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0620 18:10:53.417576  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:06 old-k8s-version-577369 kubelet[1201]: E0620 18:06:06.253227    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0620 18:10:53.417909  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:12 old-k8s-version-577369 kubelet[1201]: E0620 18:06:12.213225    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.418095  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:20 old-k8s-version-577369 kubelet[1201]: E0620 18:06:20.220208    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.420319  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:25 old-k8s-version-577369 kubelet[1201]: E0620 18:06:25.880053    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0620 18:10:53.420506  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:34 old-k8s-version-577369 kubelet[1201]: E0620 18:06:34.213970    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.420708  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:39 old-k8s-version-577369 kubelet[1201]: E0620 18:06:39.231731    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.422875  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:49 old-k8s-version-577369 kubelet[1201]: E0620 18:06:49.256445    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0620 18:10:53.423094  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:53 old-k8s-version-577369 kubelet[1201]: E0620 18:06:53.229559    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.423282  362254 logs.go:138] Found kubelet problem: Jun 20 18:07:02 old-k8s-version-577369 kubelet[1201]: E0620 18:07:02.213019    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.425508  362254 logs.go:138] Found kubelet problem: Jun 20 18:07:07 old-k8s-version-577369 kubelet[1201]: E0620 18:07:07.909458    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0620 18:10:53.425695  362254 logs.go:138] Found kubelet problem: Jun 20 18:07:17 old-k8s-version-577369 kubelet[1201]: E0620 18:07:17.213306    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.425892  362254 logs.go:138] Found kubelet problem: Jun 20 18:07:20 old-k8s-version-577369 kubelet[1201]: E0620 18:07:20.213221    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.426129  362254 logs.go:138] Found kubelet problem: Jun 20 18:07:32 old-k8s-version-577369 kubelet[1201]: E0620 18:07:32.213001    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.426358  362254 logs.go:138] Found kubelet problem: Jun 20 18:07:35 old-k8s-version-577369 kubelet[1201]: E0620 18:07:35.214923    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.426574  362254 logs.go:138] Found kubelet problem: Jun 20 18:07:47 old-k8s-version-577369 kubelet[1201]: E0620 18:07:47.215155    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.426803  362254 logs.go:138] Found kubelet problem: Jun 20 18:07:50 old-k8s-version-577369 kubelet[1201]: E0620 18:07:50.213002    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.427024  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:02 old-k8s-version-577369 kubelet[1201]: E0620 18:08:02.213224    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.427277  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:04 old-k8s-version-577369 kubelet[1201]: E0620 18:08:04.224800    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.429643  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:13 old-k8s-version-577369 kubelet[1201]: E0620 18:08:13.248654    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0620 18:10:53.429866  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:18 old-k8s-version-577369 kubelet[1201]: E0620 18:08:18.213216    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.430079  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:24 old-k8s-version-577369 kubelet[1201]: E0620 18:08:24.213044    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.432435  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:31 old-k8s-version-577369 kubelet[1201]: E0620 18:08:31.863326    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0620 18:10:53.432690  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:35 old-k8s-version-577369 kubelet[1201]: E0620 18:08:35.226288    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.432906  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:44 old-k8s-version-577369 kubelet[1201]: E0620 18:08:44.214061    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.433095  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:46 old-k8s-version-577369 kubelet[1201]: E0620 18:08:46.213051    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.433312  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:57 old-k8s-version-577369 kubelet[1201]: E0620 18:08:57.213817    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.434754  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:58 old-k8s-version-577369 kubelet[1201]: E0620 18:08:58.213216    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.434962  362254 logs.go:138] Found kubelet problem: Jun 20 18:09:11 old-k8s-version-577369 kubelet[1201]: E0620 18:09:11.219178    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.435211  362254 logs.go:138] Found kubelet problem: Jun 20 18:09:11 old-k8s-version-577369 kubelet[1201]: E0620 18:09:11.223804    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.435405  362254 logs.go:138] Found kubelet problem: Jun 20 18:09:25 old-k8s-version-577369 kubelet[1201]: E0620 18:09:25.213628    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.435612  362254 logs.go:138] Found kubelet problem: Jun 20 18:09:25 old-k8s-version-577369 kubelet[1201]: E0620 18:09:25.220085    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.435796  362254 logs.go:138] Found kubelet problem: Jun 20 18:09:37 old-k8s-version-577369 kubelet[1201]: E0620 18:09:37.218633    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.435993  362254 logs.go:138] Found kubelet problem: Jun 20 18:09:39 old-k8s-version-577369 kubelet[1201]: E0620 18:09:39.213129    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.436176  362254 logs.go:138] Found kubelet problem: Jun 20 18:09:49 old-k8s-version-577369 kubelet[1201]: E0620 18:09:49.212880    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.436373  362254 logs.go:138] Found kubelet problem: Jun 20 18:09:52 old-k8s-version-577369 kubelet[1201]: E0620 18:09:52.213161    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.436556  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:01 old-k8s-version-577369 kubelet[1201]: E0620 18:10:01.220058    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.436792  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:07 old-k8s-version-577369 kubelet[1201]: E0620 18:10:07.213133    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.436983  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:16 old-k8s-version-577369 kubelet[1201]: E0620 18:10:16.212946    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.437180  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:18 old-k8s-version-577369 kubelet[1201]: E0620 18:10:18.220165    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.437378  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:31 old-k8s-version-577369 kubelet[1201]: E0620 18:10:31.213199    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.437565  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:31 old-k8s-version-577369 kubelet[1201]: E0620 18:10:31.215427    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.443097  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:42 old-k8s-version-577369 kubelet[1201]: E0620 18:10:42.213713    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.443303  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:43 old-k8s-version-577369 kubelet[1201]: E0620 18:10:43.221571    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	I0620 18:10:53.443318  362254 logs.go:123] Gathering logs for etcd [25a953b6e46d] ...
	I0620 18:10:53.443331  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 25a953b6e46d"
	I0620 18:10:53.484821  362254 logs.go:123] Gathering logs for storage-provisioner [16e28ef3ddc9] ...
	I0620 18:10:53.484852  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 16e28ef3ddc9"
	I0620 18:10:53.512687  362254 logs.go:123] Gathering logs for Docker ...
	I0620 18:10:53.512715  362254 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u docker -u cri-docker -n 400"
	I0620 18:10:53.542148  362254 logs.go:123] Gathering logs for kubernetes-dashboard [49efdc60eccc] ...
	I0620 18:10:53.542183  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 49efdc60eccc"
	I0620 18:10:53.570140  362254 logs.go:123] Gathering logs for dmesg ...
	I0620 18:10:53.570171  362254 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
	I0620 18:10:53.588771  362254 logs.go:123] Gathering logs for coredns [7e60d81fce7f] ...
	I0620 18:10:53.588800  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 7e60d81fce7f"
	I0620 18:10:53.615089  362254 logs.go:123] Gathering logs for kube-scheduler [4ed5438feb8b] ...
	I0620 18:10:53.615127  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 4ed5438feb8b"
	I0620 18:10:53.647925  362254 logs.go:123] Gathering logs for kube-scheduler [ab177be73cae] ...
	I0620 18:10:53.647953  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 ab177be73cae"
	I0620 18:10:53.678028  362254 logs.go:123] Gathering logs for kube-controller-manager [a4eee0f3ea35] ...
	I0620 18:10:53.678058  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 a4eee0f3ea35"
	I0620 18:10:53.741842  362254 out.go:304] Setting ErrFile to fd 2...
	I0620 18:10:53.741873  362254 out.go:338] TERM=,COLORTERM=, which probably does not support color
	W0620 18:10:53.741941  362254 out.go:239] X Problems detected in kubelet:
	X Problems detected in kubelet:
	W0620 18:10:53.741956  362254 out.go:239]   Jun 20 18:10:18 old-k8s-version-577369 kubelet[1201]: E0620 18:10:18.220165    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	  Jun 20 18:10:18 old-k8s-version-577369 kubelet[1201]: E0620 18:10:18.220165    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.741969  362254 out.go:239]   Jun 20 18:10:31 old-k8s-version-577369 kubelet[1201]: E0620 18:10:31.213199    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	  Jun 20 18:10:31 old-k8s-version-577369 kubelet[1201]: E0620 18:10:31.213199    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.741977  362254 out.go:239]   Jun 20 18:10:31 old-k8s-version-577369 kubelet[1201]: E0620 18:10:31.215427    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	  Jun 20 18:10:31 old-k8s-version-577369 kubelet[1201]: E0620 18:10:31.215427    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.741984  362254 out.go:239]   Jun 20 18:10:42 old-k8s-version-577369 kubelet[1201]: E0620 18:10:42.213713    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	  Jun 20 18:10:42 old-k8s-version-577369 kubelet[1201]: E0620 18:10:42.213713    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.742104  362254 out.go:239]   Jun 20 18:10:43 old-k8s-version-577369 kubelet[1201]: E0620 18:10:43.221571    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	  Jun 20 18:10:43 old-k8s-version-577369 kubelet[1201]: E0620 18:10:43.221571    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	I0620 18:10:53.742120  362254 out.go:304] Setting ErrFile to fd 2...
	I0620 18:10:53.742126  362254 out.go:338] TERM=,COLORTERM=, which probably does not support color
	I0620 18:11:03.744154  362254 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I0620 18:11:03.759975  362254 api_server.go:72] duration metric: took 5m53.685709451s to wait for apiserver process to appear ...
	I0620 18:11:03.759998  362254 api_server.go:88] waiting for apiserver healthz status ...
	I0620 18:11:03.760078  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-apiserver --format={{.ID}}
	I0620 18:11:03.798135  362254 logs.go:276] 2 containers: [075e697d07c8 760f9a7d272c]
	I0620 18:11:03.798211  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_etcd --format={{.ID}}
	I0620 18:11:03.831434  362254 logs.go:276] 2 containers: [25a953b6e46d 81eaddbb4b45]
	I0620 18:11:03.831511  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_coredns --format={{.ID}}
	I0620 18:11:03.861101  362254 logs.go:276] 2 containers: [5534d9d547fd 7e60d81fce7f]
	I0620 18:11:03.861185  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-scheduler --format={{.ID}}
	I0620 18:11:03.905927  362254 logs.go:276] 2 containers: [4ed5438feb8b ab177be73cae]
	I0620 18:11:03.906011  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-proxy --format={{.ID}}
	I0620 18:11:03.944486  362254 logs.go:276] 2 containers: [0b32a67571bb c47b7591b320]
	I0620 18:11:03.944579  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-controller-manager --format={{.ID}}
	I0620 18:11:03.975451  362254 logs.go:276] 2 containers: [aab72b193950 a4eee0f3ea35]
	I0620 18:11:03.975533  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kindnet --format={{.ID}}
	I0620 18:11:03.998509  362254 logs.go:276] 0 containers: []
	W0620 18:11:03.998529  362254 logs.go:278] No container was found matching "kindnet"
	I0620 18:11:03.998586  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kubernetes-dashboard --format={{.ID}}
	I0620 18:11:04.031360  362254 logs.go:276] 1 containers: [49efdc60eccc]
	I0620 18:11:04.031444  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_storage-provisioner --format={{.ID}}
	I0620 18:11:04.057548  362254 logs.go:276] 2 containers: [74b467d165b4 16e28ef3ddc9]
	I0620 18:11:04.057630  362254 logs.go:123] Gathering logs for kubelet ...
	I0620 18:11:04.057656  362254 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
	W0620 18:11:04.137517  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:23 old-k8s-version-577369 kubelet[1201]: E0620 18:05:23.695585    1201 reflector.go:138] object-"kube-system"/"metrics-server-token-xd7wc": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "metrics-server-token-xd7wc" is forbidden: User "system:node:old-k8s-version-577369" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-577369' and this object
	W0620 18:11:04.137802  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:23 old-k8s-version-577369 kubelet[1201]: E0620 18:05:23.698643    1201 reflector.go:138] object-"kube-system"/"kube-proxy": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "kube-proxy" is forbidden: User "system:node:old-k8s-version-577369" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-577369' and this object
	W0620 18:11:04.138082  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:23 old-k8s-version-577369 kubelet[1201]: E0620 18:05:23.698704    1201 reflector.go:138] object-"kube-system"/"storage-provisioner-token-b9lfw": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "storage-provisioner-token-b9lfw" is forbidden: User "system:node:old-k8s-version-577369" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-577369' and this object
	W0620 18:11:04.138316  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:23 old-k8s-version-577369 kubelet[1201]: E0620 18:05:23.698918    1201 reflector.go:138] object-"kube-system"/"coredns": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "coredns" is forbidden: User "system:node:old-k8s-version-577369" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-577369' and this object
	W0620 18:11:04.138554  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:23 old-k8s-version-577369 kubelet[1201]: E0620 18:05:23.699130    1201 reflector.go:138] object-"kube-system"/"coredns-token-br6s7": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "coredns-token-br6s7" is forbidden: User "system:node:old-k8s-version-577369" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-577369' and this object
	W0620 18:11:04.138788  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:23 old-k8s-version-577369 kubelet[1201]: E0620 18:05:23.699223    1201 reflector.go:138] object-"default"/"default-token-9m8v4": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "default-token-9m8v4" is forbidden: User "system:node:old-k8s-version-577369" cannot list resource "secrets" in API group "" in the namespace "default": no relationship found between node 'old-k8s-version-577369' and this object
	W0620 18:11:04.139088  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:23 old-k8s-version-577369 kubelet[1201]: E0620 18:05:23.699291    1201 reflector.go:138] object-"kube-system"/"kube-proxy-token-kwb2s": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "kube-proxy-token-kwb2s" is forbidden: User "system:node:old-k8s-version-577369" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-577369' and this object
	W0620 18:11:04.145789  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:28 old-k8s-version-577369 kubelet[1201]: E0620 18:05:27.999585    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0620 18:11:04.146521  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:28 old-k8s-version-577369 kubelet[1201]: E0620 18:05:28.453263    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.149146  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:41 old-k8s-version-577369 kubelet[1201]: E0620 18:05:41.280792    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0620 18:11:04.155757  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:46 old-k8s-version-577369 kubelet[1201]: E0620 18:05:46.379764    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0620 18:11:04.156394  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:46 old-k8s-version-577369 kubelet[1201]: E0620 18:05:46.795712    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.156632  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:47 old-k8s-version-577369 kubelet[1201]: E0620 18:05:47.792472    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.157039  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:53 old-k8s-version-577369 kubelet[1201]: E0620 18:05:53.229484    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.157839  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:58 old-k8s-version-577369 kubelet[1201]: E0620 18:05:58.937844    1201 pod_workers.go:191] Error syncing pod 47a50d23-d504-4f3b-a3a1-97513673c10e ("storage-provisioner_kube-system(47a50d23-d504-4f3b-a3a1-97513673c10e)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 10s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(47a50d23-d504-4f3b-a3a1-97513673c10e)"
	W0620 18:11:04.160409  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:00 old-k8s-version-577369 kubelet[1201]: E0620 18:06:00.973843    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0620 18:11:04.162846  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:06 old-k8s-version-577369 kubelet[1201]: E0620 18:06:06.253227    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0620 18:11:04.163226  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:12 old-k8s-version-577369 kubelet[1201]: E0620 18:06:12.213225    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.163438  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:20 old-k8s-version-577369 kubelet[1201]: E0620 18:06:20.220208    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.165695  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:25 old-k8s-version-577369 kubelet[1201]: E0620 18:06:25.880053    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0620 18:11:04.165909  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:34 old-k8s-version-577369 kubelet[1201]: E0620 18:06:34.213970    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.166131  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:39 old-k8s-version-577369 kubelet[1201]: E0620 18:06:39.231731    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.168276  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:49 old-k8s-version-577369 kubelet[1201]: E0620 18:06:49.256445    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0620 18:11:04.168505  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:53 old-k8s-version-577369 kubelet[1201]: E0620 18:06:53.229559    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.168720  362254 logs.go:138] Found kubelet problem: Jun 20 18:07:02 old-k8s-version-577369 kubelet[1201]: E0620 18:07:02.213019    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.170964  362254 logs.go:138] Found kubelet problem: Jun 20 18:07:07 old-k8s-version-577369 kubelet[1201]: E0620 18:07:07.909458    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0620 18:11:04.176940  362254 logs.go:138] Found kubelet problem: Jun 20 18:07:17 old-k8s-version-577369 kubelet[1201]: E0620 18:07:17.213306    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.177172  362254 logs.go:138] Found kubelet problem: Jun 20 18:07:20 old-k8s-version-577369 kubelet[1201]: E0620 18:07:20.213221    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.177380  362254 logs.go:138] Found kubelet problem: Jun 20 18:07:32 old-k8s-version-577369 kubelet[1201]: E0620 18:07:32.213001    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.177601  362254 logs.go:138] Found kubelet problem: Jun 20 18:07:35 old-k8s-version-577369 kubelet[1201]: E0620 18:07:35.214923    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.177809  362254 logs.go:138] Found kubelet problem: Jun 20 18:07:47 old-k8s-version-577369 kubelet[1201]: E0620 18:07:47.215155    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.178032  362254 logs.go:138] Found kubelet problem: Jun 20 18:07:50 old-k8s-version-577369 kubelet[1201]: E0620 18:07:50.213002    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.178241  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:02 old-k8s-version-577369 kubelet[1201]: E0620 18:08:02.213224    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.178468  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:04 old-k8s-version-577369 kubelet[1201]: E0620 18:08:04.224800    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.180581  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:13 old-k8s-version-577369 kubelet[1201]: E0620 18:08:13.248654    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0620 18:11:04.180807  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:18 old-k8s-version-577369 kubelet[1201]: E0620 18:08:18.213216    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.181015  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:24 old-k8s-version-577369 kubelet[1201]: E0620 18:08:24.213044    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.186304  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:31 old-k8s-version-577369 kubelet[1201]: E0620 18:08:31.863326    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0620 18:11:04.186572  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:35 old-k8s-version-577369 kubelet[1201]: E0620 18:08:35.226288    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.186801  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:44 old-k8s-version-577369 kubelet[1201]: E0620 18:08:44.214061    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.187082  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:46 old-k8s-version-577369 kubelet[1201]: E0620 18:08:46.213051    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.187306  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:57 old-k8s-version-577369 kubelet[1201]: E0620 18:08:57.213817    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.187514  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:58 old-k8s-version-577369 kubelet[1201]: E0620 18:08:58.213216    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.187732  362254 logs.go:138] Found kubelet problem: Jun 20 18:09:11 old-k8s-version-577369 kubelet[1201]: E0620 18:09:11.219178    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.187954  362254 logs.go:138] Found kubelet problem: Jun 20 18:09:11 old-k8s-version-577369 kubelet[1201]: E0620 18:09:11.223804    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.188162  362254 logs.go:138] Found kubelet problem: Jun 20 18:09:25 old-k8s-version-577369 kubelet[1201]: E0620 18:09:25.213628    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.188387  362254 logs.go:138] Found kubelet problem: Jun 20 18:09:25 old-k8s-version-577369 kubelet[1201]: E0620 18:09:25.220085    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.188608  362254 logs.go:138] Found kubelet problem: Jun 20 18:09:37 old-k8s-version-577369 kubelet[1201]: E0620 18:09:37.218633    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.188830  362254 logs.go:138] Found kubelet problem: Jun 20 18:09:39 old-k8s-version-577369 kubelet[1201]: E0620 18:09:39.213129    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.189038  362254 logs.go:138] Found kubelet problem: Jun 20 18:09:49 old-k8s-version-577369 kubelet[1201]: E0620 18:09:49.212880    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.189258  362254 logs.go:138] Found kubelet problem: Jun 20 18:09:52 old-k8s-version-577369 kubelet[1201]: E0620 18:09:52.213161    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.189468  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:01 old-k8s-version-577369 kubelet[1201]: E0620 18:10:01.220058    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.189694  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:07 old-k8s-version-577369 kubelet[1201]: E0620 18:10:07.213133    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.189902  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:16 old-k8s-version-577369 kubelet[1201]: E0620 18:10:16.212946    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.190121  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:18 old-k8s-version-577369 kubelet[1201]: E0620 18:10:18.220165    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.190341  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:31 old-k8s-version-577369 kubelet[1201]: E0620 18:10:31.213199    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.190550  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:31 old-k8s-version-577369 kubelet[1201]: E0620 18:10:31.215427    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.190773  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:42 old-k8s-version-577369 kubelet[1201]: E0620 18:10:42.213713    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.193468  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:43 old-k8s-version-577369 kubelet[1201]: E0620 18:10:43.221571    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.193769  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:55 old-k8s-version-577369 kubelet[1201]: E0620 18:10:55.213780    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.195893  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:56 old-k8s-version-577369 kubelet[1201]: E0620 18:10:56.254103    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	I0620 18:11:04.195923  362254 logs.go:123] Gathering logs for describe nodes ...
	I0620 18:11:04.195949  362254 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.20.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
	I0620 18:11:04.426160  362254 logs.go:123] Gathering logs for kube-apiserver [760f9a7d272c] ...
	I0620 18:11:04.426342  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 760f9a7d272c"
	I0620 18:11:04.528343  362254 logs.go:123] Gathering logs for kube-scheduler [ab177be73cae] ...
	I0620 18:11:04.528424  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 ab177be73cae"
	I0620 18:11:04.573441  362254 logs.go:123] Gathering logs for storage-provisioner [16e28ef3ddc9] ...
	I0620 18:11:04.573520  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 16e28ef3ddc9"
	I0620 18:11:04.632739  362254 logs.go:123] Gathering logs for Docker ...
	I0620 18:11:04.632808  362254 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u docker -u cri-docker -n 400"
	I0620 18:11:04.676322  362254 logs.go:123] Gathering logs for kube-apiserver [075e697d07c8] ...
	I0620 18:11:04.676396  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 075e697d07c8"
	I0620 18:11:04.751250  362254 logs.go:123] Gathering logs for etcd [25a953b6e46d] ...
	I0620 18:11:04.751327  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 25a953b6e46d"
	I0620 18:11:04.825564  362254 logs.go:123] Gathering logs for kube-controller-manager [a4eee0f3ea35] ...
	I0620 18:11:04.825644  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 a4eee0f3ea35"
	I0620 18:11:04.916404  362254 logs.go:123] Gathering logs for dmesg ...
	I0620 18:11:04.916479  362254 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
	I0620 18:11:04.944002  362254 logs.go:123] Gathering logs for etcd [81eaddbb4b45] ...
	I0620 18:11:04.944073  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 81eaddbb4b45"
	I0620 18:11:04.995804  362254 logs.go:123] Gathering logs for coredns [5534d9d547fd] ...
	I0620 18:11:04.995893  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 5534d9d547fd"
	I0620 18:11:05.044664  362254 logs.go:123] Gathering logs for kube-scheduler [4ed5438feb8b] ...
	I0620 18:11:05.044747  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 4ed5438feb8b"
	I0620 18:11:05.082366  362254 logs.go:123] Gathering logs for kube-proxy [0b32a67571bb] ...
	I0620 18:11:05.082463  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 0b32a67571bb"
	I0620 18:11:05.141372  362254 logs.go:123] Gathering logs for kubernetes-dashboard [49efdc60eccc] ...
	I0620 18:11:05.141442  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 49efdc60eccc"
	I0620 18:11:05.173015  362254 logs.go:123] Gathering logs for storage-provisioner [74b467d165b4] ...
	I0620 18:11:05.173101  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 74b467d165b4"
	I0620 18:11:05.211503  362254 logs.go:123] Gathering logs for coredns [7e60d81fce7f] ...
	I0620 18:11:05.211569  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 7e60d81fce7f"
	I0620 18:11:05.253118  362254 logs.go:123] Gathering logs for kube-proxy [c47b7591b320] ...
	I0620 18:11:05.253195  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 c47b7591b320"
	I0620 18:11:05.285055  362254 logs.go:123] Gathering logs for kube-controller-manager [aab72b193950] ...
	I0620 18:11:05.285121  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 aab72b193950"
	I0620 18:11:05.348716  362254 logs.go:123] Gathering logs for container status ...
	I0620 18:11:05.348790  362254 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
	I0620 18:11:05.447039  362254 out.go:304] Setting ErrFile to fd 2...
	I0620 18:11:05.447134  362254 out.go:338] TERM=,COLORTERM=, which probably does not support color
	W0620 18:11:05.447272  362254 out.go:239] X Problems detected in kubelet:
	X Problems detected in kubelet:
	W0620 18:11:05.447326  362254 out.go:239]   Jun 20 18:10:31 old-k8s-version-577369 kubelet[1201]: E0620 18:10:31.215427    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	  Jun 20 18:10:31 old-k8s-version-577369 kubelet[1201]: E0620 18:10:31.215427    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:05.447358  362254 out.go:239]   Jun 20 18:10:42 old-k8s-version-577369 kubelet[1201]: E0620 18:10:42.213713    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	  Jun 20 18:10:42 old-k8s-version-577369 kubelet[1201]: E0620 18:10:42.213713    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:05.447432  362254 out.go:239]   Jun 20 18:10:43 old-k8s-version-577369 kubelet[1201]: E0620 18:10:43.221571    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	  Jun 20 18:10:43 old-k8s-version-577369 kubelet[1201]: E0620 18:10:43.221571    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:05.447521  362254 out.go:239]   Jun 20 18:10:55 old-k8s-version-577369 kubelet[1201]: E0620 18:10:55.213780    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	  Jun 20 18:10:55 old-k8s-version-577369 kubelet[1201]: E0620 18:10:55.213780    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:05.447602  362254 out.go:239]   Jun 20 18:10:56 old-k8s-version-577369 kubelet[1201]: E0620 18:10:56.254103    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	  Jun 20 18:10:56 old-k8s-version-577369 kubelet[1201]: E0620 18:10:56.254103    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	I0620 18:11:05.447662  362254 out.go:304] Setting ErrFile to fd 2...
	I0620 18:11:05.447707  362254 out.go:338] TERM=,COLORTERM=, which probably does not support color
	I0620 18:11:15.449472  362254 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
	I0620 18:11:15.463258  362254 api_server.go:279] https://192.168.85.2:8443/healthz returned 200:
	ok
	I0620 18:11:15.465704  362254 out.go:177] 
	W0620 18:11:15.467872  362254 out.go:239] X Exiting due to K8S_UNHEALTHY_CONTROL_PLANE: wait 6m0s for node: wait for healthy API server: controlPlane never updated to v1.20.0
	X Exiting due to K8S_UNHEALTHY_CONTROL_PLANE: wait 6m0s for node: wait for healthy API server: controlPlane never updated to v1.20.0
	W0620 18:11:15.467924  362254 out.go:239] * Suggestion: Control Plane could not update, try minikube delete --all --purge
	* Suggestion: Control Plane could not update, try minikube delete --all --purge
	W0620 18:11:15.467944  362254 out.go:239] * Related issue: https://github.com/kubernetes/minikube/issues/11417
	* Related issue: https://github.com/kubernetes/minikube/issues/11417
	W0620 18:11:15.467950  362254 out.go:239] * 
	* 
	W0620 18:11:15.468829  362254 out.go:239] ╭─────────────────────────────────────────────────────────────────────────────────────────────╮
	│                                                                                             │
	│    * If the above advice does not help, please let us know:                                 │
	│      https://github.com/kubernetes/minikube/issues/new/choose                               │
	│                                                                                             │
	│    * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue.    │
	│                                                                                             │
	╰─────────────────────────────────────────────────────────────────────────────────────────────╯
	╭─────────────────────────────────────────────────────────────────────────────────────────────╮
	│                                                                                             │
	│    * If the above advice does not help, please let us know:                                 │
	│      https://github.com/kubernetes/minikube/issues/new/choose                               │
	│                                                                                             │
	│    * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue.    │
	│                                                                                             │
	╰─────────────────────────────────────────────────────────────────────────────────────────────╯
	I0620 18:11:15.471344  362254 out.go:177] 

                                                
                                                
** /stderr **
start_stop_delete_test.go:259: failed to start minikube post-stop. args "out/minikube-linux-arm64 start -p old-k8s-version-577369 --memory=2200 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker  --container-runtime=docker --kubernetes-version=v1.20.0": exit status 102
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======>  post-mortem[TestStartStop/group/old-k8s-version/serial/SecondStart]: docker inspect <======
helpers_test.go:231: (dbg) Run:  docker inspect old-k8s-version-577369
helpers_test.go:235: (dbg) docker inspect old-k8s-version-577369:

                                                
                                                
-- stdout --
	[
	    {
	        "Id": "c4d3d73364cad899054ce908aa131722f0ea31d5c2d95544fcf715bd51754671",
	        "Created": "2024-06-20T18:02:15.594669718Z",
	        "Path": "/usr/local/bin/entrypoint",
	        "Args": [
	            "/sbin/init"
	        ],
	        "State": {
	            "Status": "running",
	            "Running": true,
	            "Paused": false,
	            "Restarting": false,
	            "OOMKilled": false,
	            "Dead": false,
	            "Pid": 362440,
	            "ExitCode": 0,
	            "Error": "",
	            "StartedAt": "2024-06-20T18:05:02.794317634Z",
	            "FinishedAt": "2024-06-20T18:05:01.413666697Z"
	        },
	        "Image": "sha256:d01e921d87b5c98766e198911bba95096a87baa7b20caabee6d66ddda3a30e16",
	        "ResolvConfPath": "/var/lib/docker/containers/c4d3d73364cad899054ce908aa131722f0ea31d5c2d95544fcf715bd51754671/resolv.conf",
	        "HostnamePath": "/var/lib/docker/containers/c4d3d73364cad899054ce908aa131722f0ea31d5c2d95544fcf715bd51754671/hostname",
	        "HostsPath": "/var/lib/docker/containers/c4d3d73364cad899054ce908aa131722f0ea31d5c2d95544fcf715bd51754671/hosts",
	        "LogPath": "/var/lib/docker/containers/c4d3d73364cad899054ce908aa131722f0ea31d5c2d95544fcf715bd51754671/c4d3d73364cad899054ce908aa131722f0ea31d5c2d95544fcf715bd51754671-json.log",
	        "Name": "/old-k8s-version-577369",
	        "RestartCount": 0,
	        "Driver": "overlay2",
	        "Platform": "linux",
	        "MountLabel": "",
	        "ProcessLabel": "",
	        "AppArmorProfile": "unconfined",
	        "ExecIDs": null,
	        "HostConfig": {
	            "Binds": [
	                "/lib/modules:/lib/modules:ro",
	                "old-k8s-version-577369:/var"
	            ],
	            "ContainerIDFile": "",
	            "LogConfig": {
	                "Type": "json-file",
	                "Config": {}
	            },
	            "NetworkMode": "old-k8s-version-577369",
	            "PortBindings": {
	                "22/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "2376/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "32443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "5000/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "8443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ]
	            },
	            "RestartPolicy": {
	                "Name": "no",
	                "MaximumRetryCount": 0
	            },
	            "AutoRemove": false,
	            "VolumeDriver": "",
	            "VolumesFrom": null,
	            "ConsoleSize": [
	                0,
	                0
	            ],
	            "CapAdd": null,
	            "CapDrop": null,
	            "CgroupnsMode": "host",
	            "Dns": [],
	            "DnsOptions": [],
	            "DnsSearch": [],
	            "ExtraHosts": null,
	            "GroupAdd": null,
	            "IpcMode": "private",
	            "Cgroup": "",
	            "Links": null,
	            "OomScoreAdj": 0,
	            "PidMode": "",
	            "Privileged": true,
	            "PublishAllPorts": false,
	            "ReadonlyRootfs": false,
	            "SecurityOpt": [
	                "seccomp=unconfined",
	                "apparmor=unconfined",
	                "label=disable"
	            ],
	            "Tmpfs": {
	                "/run": "",
	                "/tmp": ""
	            },
	            "UTSMode": "",
	            "UsernsMode": "",
	            "ShmSize": 67108864,
	            "Runtime": "runc",
	            "Isolation": "",
	            "CpuShares": 0,
	            "Memory": 2306867200,
	            "NanoCpus": 2000000000,
	            "CgroupParent": "",
	            "BlkioWeight": 0,
	            "BlkioWeightDevice": [],
	            "BlkioDeviceReadBps": [],
	            "BlkioDeviceWriteBps": [],
	            "BlkioDeviceReadIOps": [],
	            "BlkioDeviceWriteIOps": [],
	            "CpuPeriod": 0,
	            "CpuQuota": 0,
	            "CpuRealtimePeriod": 0,
	            "CpuRealtimeRuntime": 0,
	            "CpusetCpus": "",
	            "CpusetMems": "",
	            "Devices": [],
	            "DeviceCgroupRules": null,
	            "DeviceRequests": null,
	            "MemoryReservation": 0,
	            "MemorySwap": 4613734400,
	            "MemorySwappiness": null,
	            "OomKillDisable": false,
	            "PidsLimit": null,
	            "Ulimits": [],
	            "CpuCount": 0,
	            "CpuPercent": 0,
	            "IOMaximumIOps": 0,
	            "IOMaximumBandwidth": 0,
	            "MaskedPaths": null,
	            "ReadonlyPaths": null
	        },
	        "GraphDriver": {
	            "Data": {
	                "LowerDir": "/var/lib/docker/overlay2/12a39ce1488abdfe7a427108b1f33bc367db04f2ce3023e8fbc4fa76863f37e3-init/diff:/var/lib/docker/overlay2/3f60bca961993969053ba4629c08bd2e3000c79c5940aaa31be086f923fe76a2/diff",
	                "MergedDir": "/var/lib/docker/overlay2/12a39ce1488abdfe7a427108b1f33bc367db04f2ce3023e8fbc4fa76863f37e3/merged",
	                "UpperDir": "/var/lib/docker/overlay2/12a39ce1488abdfe7a427108b1f33bc367db04f2ce3023e8fbc4fa76863f37e3/diff",
	                "WorkDir": "/var/lib/docker/overlay2/12a39ce1488abdfe7a427108b1f33bc367db04f2ce3023e8fbc4fa76863f37e3/work"
	            },
	            "Name": "overlay2"
	        },
	        "Mounts": [
	            {
	                "Type": "bind",
	                "Source": "/lib/modules",
	                "Destination": "/lib/modules",
	                "Mode": "ro",
	                "RW": false,
	                "Propagation": "rprivate"
	            },
	            {
	                "Type": "volume",
	                "Name": "old-k8s-version-577369",
	                "Source": "/var/lib/docker/volumes/old-k8s-version-577369/_data",
	                "Destination": "/var",
	                "Driver": "local",
	                "Mode": "z",
	                "RW": true,
	                "Propagation": ""
	            }
	        ],
	        "Config": {
	            "Hostname": "old-k8s-version-577369",
	            "Domainname": "",
	            "User": "",
	            "AttachStdin": false,
	            "AttachStdout": false,
	            "AttachStderr": false,
	            "ExposedPorts": {
	                "22/tcp": {},
	                "2376/tcp": {},
	                "32443/tcp": {},
	                "5000/tcp": {},
	                "8443/tcp": {}
	            },
	            "Tty": true,
	            "OpenStdin": false,
	            "StdinOnce": false,
	            "Env": [
	                "container=docker",
	                "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
	            ],
	            "Cmd": null,
	            "Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636",
	            "Volumes": null,
	            "WorkingDir": "/",
	            "Entrypoint": [
	                "/usr/local/bin/entrypoint",
	                "/sbin/init"
	            ],
	            "OnBuild": null,
	            "Labels": {
	                "created_by.minikube.sigs.k8s.io": "true",
	                "mode.minikube.sigs.k8s.io": "old-k8s-version-577369",
	                "name.minikube.sigs.k8s.io": "old-k8s-version-577369",
	                "role.minikube.sigs.k8s.io": ""
	            },
	            "StopSignal": "SIGRTMIN+3"
	        },
	        "NetworkSettings": {
	            "Bridge": "",
	            "SandboxID": "3931d835949d51cdaba053e30d371365c5c397b1d4566768bd331c8958a78b13",
	            "SandboxKey": "/var/run/docker/netns/3931d835949d",
	            "Ports": {
	                "22/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33128"
	                    }
	                ],
	                "2376/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33127"
	                    }
	                ],
	                "32443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33124"
	                    }
	                ],
	                "5000/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33126"
	                    }
	                ],
	                "8443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33125"
	                    }
	                ]
	            },
	            "HairpinMode": false,
	            "LinkLocalIPv6Address": "",
	            "LinkLocalIPv6PrefixLen": 0,
	            "SecondaryIPAddresses": null,
	            "SecondaryIPv6Addresses": null,
	            "EndpointID": "",
	            "Gateway": "",
	            "GlobalIPv6Address": "",
	            "GlobalIPv6PrefixLen": 0,
	            "IPAddress": "",
	            "IPPrefixLen": 0,
	            "IPv6Gateway": "",
	            "MacAddress": "",
	            "Networks": {
	                "old-k8s-version-577369": {
	                    "IPAMConfig": {
	                        "IPv4Address": "192.168.85.2"
	                    },
	                    "Links": null,
	                    "Aliases": null,
	                    "MacAddress": "02:42:c0:a8:55:02",
	                    "NetworkID": "c61783ab8de1f95a445bcccb1c20832a63d43cf0050441f45622bd62211b83ae",
	                    "EndpointID": "5730e8426e7592f02fe86bb8198377828cfc0d258c3f72d90eae3d4273a8df33",
	                    "Gateway": "192.168.85.1",
	                    "IPAddress": "192.168.85.2",
	                    "IPPrefixLen": 24,
	                    "IPv6Gateway": "",
	                    "GlobalIPv6Address": "",
	                    "GlobalIPv6PrefixLen": 0,
	                    "DriverOpts": null,
	                    "DNSNames": [
	                        "old-k8s-version-577369",
	                        "c4d3d73364ca"
	                    ]
	                }
	            }
	        }
	    }
	]

                                                
                                                
-- /stdout --
helpers_test.go:239: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p old-k8s-version-577369 -n old-k8s-version-577369
helpers_test.go:244: <<< TestStartStop/group/old-k8s-version/serial/SecondStart FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======>  post-mortem[TestStartStop/group/old-k8s-version/serial/SecondStart]: minikube logs <======
helpers_test.go:247: (dbg) Run:  out/minikube-linux-arm64 -p old-k8s-version-577369 logs -n 25
helpers_test.go:247: (dbg) Done: out/minikube-linux-arm64 -p old-k8s-version-577369 logs -n 25: (1.545539736s)
helpers_test.go:252: TestStartStop/group/old-k8s-version/serial/SecondStart logs: 
-- stdout --
	
	==> Audit <==
	|---------|--------------------------------------------------------|------------------------------|---------|---------|---------------------|---------------------|
	| Command |                          Args                          |           Profile            |  User   | Version |     Start Time      |      End Time       |
	|---------|--------------------------------------------------------|------------------------------|---------|---------|---------------------|---------------------|
	| ssh     | -p kubenet-978988 sudo                                 | kubenet-978988               | jenkins | v1.33.1 | 20 Jun 24 18:03 UTC | 20 Jun 24 18:03 UTC |
	|         | systemctl cat containerd                               |                              |         |         |                     |                     |
	|         | --no-pager                                             |                              |         |         |                     |                     |
	| ssh     | -p kubenet-978988 sudo cat                             | kubenet-978988               | jenkins | v1.33.1 | 20 Jun 24 18:03 UTC | 20 Jun 24 18:03 UTC |
	|         | /lib/systemd/system/containerd.service                 |                              |         |         |                     |                     |
	| ssh     | -p kubenet-978988 sudo cat                             | kubenet-978988               | jenkins | v1.33.1 | 20 Jun 24 18:03 UTC | 20 Jun 24 18:03 UTC |
	|         | /etc/containerd/config.toml                            |                              |         |         |                     |                     |
	| ssh     | -p kubenet-978988 sudo                                 | kubenet-978988               | jenkins | v1.33.1 | 20 Jun 24 18:03 UTC | 20 Jun 24 18:03 UTC |
	|         | containerd config dump                                 |                              |         |         |                     |                     |
	| ssh     | -p kubenet-978988 sudo                                 | kubenet-978988               | jenkins | v1.33.1 | 20 Jun 24 18:03 UTC |                     |
	|         | systemctl status crio --all                            |                              |         |         |                     |                     |
	|         | --full --no-pager                                      |                              |         |         |                     |                     |
	| ssh     | -p kubenet-978988 sudo                                 | kubenet-978988               | jenkins | v1.33.1 | 20 Jun 24 18:03 UTC | 20 Jun 24 18:03 UTC |
	|         | systemctl cat crio --no-pager                          |                              |         |         |                     |                     |
	| ssh     | -p kubenet-978988 sudo find                            | kubenet-978988               | jenkins | v1.33.1 | 20 Jun 24 18:03 UTC | 20 Jun 24 18:03 UTC |
	|         | /etc/crio -type f -exec sh -c                          |                              |         |         |                     |                     |
	|         | 'echo {}; cat {}' \;                                   |                              |         |         |                     |                     |
	| ssh     | -p kubenet-978988 sudo crio                            | kubenet-978988               | jenkins | v1.33.1 | 20 Jun 24 18:03 UTC | 20 Jun 24 18:03 UTC |
	|         | config                                                 |                              |         |         |                     |                     |
	| delete  | -p kubenet-978988                                      | kubenet-978988               | jenkins | v1.33.1 | 20 Jun 24 18:04 UTC | 20 Jun 24 18:04 UTC |
	| start   | -p embed-certs-221669                                  | embed-certs-221669           | jenkins | v1.33.1 | 20 Jun 24 18:04 UTC | 20 Jun 24 18:04 UTC |
	|         | --memory=2200                                          |                              |         |         |                     |                     |
	|         | --alsologtostderr --wait=true                          |                              |         |         |                     |                     |
	|         | --embed-certs --driver=docker                          |                              |         |         |                     |                     |
	|         |  --container-runtime=docker                            |                              |         |         |                     |                     |
	|         | --kubernetes-version=v1.30.2                           |                              |         |         |                     |                     |
	| addons  | enable metrics-server -p old-k8s-version-577369        | old-k8s-version-577369       | jenkins | v1.33.1 | 20 Jun 24 18:04 UTC | 20 Jun 24 18:04 UTC |
	|         | --images=MetricsServer=registry.k8s.io/echoserver:1.4  |                              |         |         |                     |                     |
	|         | --registries=MetricsServer=fake.domain                 |                              |         |         |                     |                     |
	| stop    | -p old-k8s-version-577369                              | old-k8s-version-577369       | jenkins | v1.33.1 | 20 Jun 24 18:04 UTC | 20 Jun 24 18:05 UTC |
	|         | --alsologtostderr -v=3                                 |                              |         |         |                     |                     |
	| addons  | enable metrics-server -p embed-certs-221669            | embed-certs-221669           | jenkins | v1.33.1 | 20 Jun 24 18:05 UTC | 20 Jun 24 18:05 UTC |
	|         | --images=MetricsServer=registry.k8s.io/echoserver:1.4  |                              |         |         |                     |                     |
	|         | --registries=MetricsServer=fake.domain                 |                              |         |         |                     |                     |
	| addons  | enable dashboard -p old-k8s-version-577369             | old-k8s-version-577369       | jenkins | v1.33.1 | 20 Jun 24 18:05 UTC | 20 Jun 24 18:05 UTC |
	|         | --images=MetricsScraper=registry.k8s.io/echoserver:1.4 |                              |         |         |                     |                     |
	| start   | -p old-k8s-version-577369                              | old-k8s-version-577369       | jenkins | v1.33.1 | 20 Jun 24 18:05 UTC |                     |
	|         | --memory=2200                                          |                              |         |         |                     |                     |
	|         | --alsologtostderr --wait=true                          |                              |         |         |                     |                     |
	|         | --kvm-network=default                                  |                              |         |         |                     |                     |
	|         | --kvm-qemu-uri=qemu:///system                          |                              |         |         |                     |                     |
	|         | --disable-driver-mounts                                |                              |         |         |                     |                     |
	|         | --keep-context=false                                   |                              |         |         |                     |                     |
	|         | --driver=docker                                        |                              |         |         |                     |                     |
	|         | --container-runtime=docker                             |                              |         |         |                     |                     |
	|         | --kubernetes-version=v1.20.0                           |                              |         |         |                     |                     |
	| stop    | -p embed-certs-221669                                  | embed-certs-221669           | jenkins | v1.33.1 | 20 Jun 24 18:05 UTC | 20 Jun 24 18:05 UTC |
	|         | --alsologtostderr -v=3                                 |                              |         |         |                     |                     |
	| addons  | enable dashboard -p embed-certs-221669                 | embed-certs-221669           | jenkins | v1.33.1 | 20 Jun 24 18:05 UTC | 20 Jun 24 18:05 UTC |
	|         | --images=MetricsScraper=registry.k8s.io/echoserver:1.4 |                              |         |         |                     |                     |
	| start   | -p embed-certs-221669                                  | embed-certs-221669           | jenkins | v1.33.1 | 20 Jun 24 18:05 UTC | 20 Jun 24 18:10 UTC |
	|         | --memory=2200                                          |                              |         |         |                     |                     |
	|         | --alsologtostderr --wait=true                          |                              |         |         |                     |                     |
	|         | --embed-certs --driver=docker                          |                              |         |         |                     |                     |
	|         |  --container-runtime=docker                            |                              |         |         |                     |                     |
	|         | --kubernetes-version=v1.30.2                           |                              |         |         |                     |                     |
	| image   | embed-certs-221669 image list                          | embed-certs-221669           | jenkins | v1.33.1 | 20 Jun 24 18:10 UTC | 20 Jun 24 18:10 UTC |
	|         | --format=json                                          |                              |         |         |                     |                     |
	| pause   | -p embed-certs-221669                                  | embed-certs-221669           | jenkins | v1.33.1 | 20 Jun 24 18:10 UTC | 20 Jun 24 18:10 UTC |
	|         | --alsologtostderr -v=1                                 |                              |         |         |                     |                     |
	| unpause | -p embed-certs-221669                                  | embed-certs-221669           | jenkins | v1.33.1 | 20 Jun 24 18:10 UTC | 20 Jun 24 18:10 UTC |
	|         | --alsologtostderr -v=1                                 |                              |         |         |                     |                     |
	| delete  | -p embed-certs-221669                                  | embed-certs-221669           | jenkins | v1.33.1 | 20 Jun 24 18:10 UTC | 20 Jun 24 18:10 UTC |
	| delete  | -p embed-certs-221669                                  | embed-certs-221669           | jenkins | v1.33.1 | 20 Jun 24 18:10 UTC | 20 Jun 24 18:10 UTC |
	| delete  | -p                                                     | disable-driver-mounts-710184 | jenkins | v1.33.1 | 20 Jun 24 18:10 UTC | 20 Jun 24 18:10 UTC |
	|         | disable-driver-mounts-710184                           |                              |         |         |                     |                     |
	| start   | -p no-preload-581163                                   | no-preload-581163            | jenkins | v1.33.1 | 20 Jun 24 18:10 UTC |                     |
	|         | --memory=2200                                          |                              |         |         |                     |                     |
	|         | --alsologtostderr                                      |                              |         |         |                     |                     |
	|         | --wait=true --preload=false                            |                              |         |         |                     |                     |
	|         | --driver=docker                                        |                              |         |         |                     |                     |
	|         | --container-runtime=docker                             |                              |         |         |                     |                     |
	|         | --kubernetes-version=v1.30.2                           |                              |         |         |                     |                     |
	|---------|--------------------------------------------------------|------------------------------|---------|---------|---------------------|---------------------|
	
	
	==> Last Start <==
	Log file created at: 2024/06/20 18:10:25
	Running on machine: ip-172-31-30-239
	Binary: Built with gc go1.22.4 for linux/arm64
	Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
	I0620 18:10:25.140286  374244 out.go:291] Setting OutFile to fd 1 ...
	I0620 18:10:25.140419  374244 out.go:338] TERM=,COLORTERM=, which probably does not support color
	I0620 18:10:25.140429  374244 out.go:304] Setting ErrFile to fd 2...
	I0620 18:10:25.140434  374244 out.go:338] TERM=,COLORTERM=, which probably does not support color
	I0620 18:10:25.140703  374244 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19106-2452/.minikube/bin
	I0620 18:10:25.141134  374244 out.go:298] Setting JSON to false
	I0620 18:10:25.142344  374244 start.go:129] hostinfo: {"hostname":"ip-172-31-30-239","uptime":6776,"bootTime":1718900249,"procs":226,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1063-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"92f46a7d-c249-4c12-924a-77f64874c910"}
	I0620 18:10:25.142471  374244 start.go:139] virtualization:  
	I0620 18:10:25.145030  374244 out.go:177] * [no-preload-581163] minikube v1.33.1 on Ubuntu 20.04 (arm64)
	I0620 18:10:25.148076  374244 out.go:177]   - MINIKUBE_LOCATION=19106
	I0620 18:10:25.148194  374244 notify.go:220] Checking for updates...
	I0620 18:10:25.152794  374244 out.go:177]   - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I0620 18:10:25.154851  374244 out.go:177]   - KUBECONFIG=/home/jenkins/minikube-integration/19106-2452/kubeconfig
	I0620 18:10:25.157021  374244 out.go:177]   - MINIKUBE_HOME=/home/jenkins/minikube-integration/19106-2452/.minikube
	I0620 18:10:25.159087  374244 out.go:177]   - MINIKUBE_BIN=out/minikube-linux-arm64
	I0620 18:10:25.161364  374244 out.go:177]   - MINIKUBE_FORCE_SYSTEMD=
	I0620 18:10:25.164043  374244 config.go:182] Loaded profile config "old-k8s-version-577369": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.20.0
	I0620 18:10:25.164151  374244 driver.go:392] Setting default libvirt URI to qemu:///system
	I0620 18:10:25.195171  374244 docker.go:122] docker version: linux-26.1.4:Docker Engine - Community
	I0620 18:10:25.195376  374244 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0620 18:10:25.272049  374244 info.go:266] docker info: {ID:6ZPO:QZND:VNGE:LUKL:4Y3K:XELL:AAX4:2GTK:E6LM:MPRN:3ZXR:TTMR Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:33 OomKillDisable:true NGoroutines:52 SystemTime:2024-06-20 18:10:25.262577932 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1063-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aar
ch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214900736 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-30-239 Labels:[] ExperimentalBuild:false ServerVersion:26.1.4 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d2d58213f83a351ca8f528a95fbd145f5654e957 Expected:d2d58213f83a351ca8f528a95fbd145f5654e957} RuncCommit:{ID:v1.1.12-0-g51d5e94 Expected:v1.1.12-0-g51d5e94} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErro
rs:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.14.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.27.1]] Warnings:<nil>}}
	I0620 18:10:25.272319  374244 docker.go:295] overlay module found
	I0620 18:10:25.274882  374244 out.go:177] * Using the docker driver based on user configuration
	I0620 18:10:25.276978  374244 start.go:297] selected driver: docker
	I0620 18:10:25.276998  374244 start.go:901] validating driver "docker" against <nil>
	I0620 18:10:25.277028  374244 start.go:912] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
	I0620 18:10:25.277694  374244 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0620 18:10:25.329313  374244 info.go:266] docker info: {ID:6ZPO:QZND:VNGE:LUKL:4Y3K:XELL:AAX4:2GTK:E6LM:MPRN:3ZXR:TTMR Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:33 OomKillDisable:true NGoroutines:52 SystemTime:2024-06-20 18:10:25.320019962 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1063-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aar
ch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214900736 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-30-239 Labels:[] ExperimentalBuild:false ServerVersion:26.1.4 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d2d58213f83a351ca8f528a95fbd145f5654e957 Expected:d2d58213f83a351ca8f528a95fbd145f5654e957} RuncCommit:{ID:v1.1.12-0-g51d5e94 Expected:v1.1.12-0-g51d5e94} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErro
rs:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.14.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.27.1]] Warnings:<nil>}}
	I0620 18:10:25.329481  374244 start_flags.go:310] no existing cluster config was found, will generate one from the flags 
	I0620 18:10:25.329766  374244 start_flags.go:947] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
	I0620 18:10:25.331793  374244 out.go:177] * Using Docker driver with root privileges
	I0620 18:10:25.333591  374244 cni.go:84] Creating CNI manager for ""
	I0620 18:10:25.333619  374244 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
	I0620 18:10:25.333631  374244 start_flags.go:319] Found "bridge CNI" CNI - setting NetworkPlugin=cni
	I0620 18:10:25.333713  374244 start.go:340] cluster config:
	{Name:no-preload-581163 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.30.2 ClusterName:no-preload-581163 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local Containe
rRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.30.2 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHA
uthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I0620 18:10:25.337658  374244 out.go:177] * Starting "no-preload-581163" primary control-plane node in "no-preload-581163" cluster
	I0620 18:10:25.339490  374244 cache.go:121] Beginning downloading kic base image for docker with docker
	I0620 18:10:25.341478  374244 out.go:177] * Pulling base image v0.0.44-1718753665-19106 ...
	I0620 18:10:25.343276  374244 preload.go:132] Checking if preload exists for k8s version v1.30.2 and runtime docker
	I0620 18:10:25.343417  374244 profile.go:143] Saving config to /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/no-preload-581163/config.json ...
	I0620 18:10:25.343453  374244 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/no-preload-581163/config.json: {Name:mk4f7553f0df17b50adb52af5140e59aedb3b299 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0620 18:10:25.343641  374244 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 in local docker daemon
	I0620 18:10:25.343755  374244 cache.go:107] acquiring lock: {Name:mkedbce372c8b021b75e838a4e792c036a761200 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0620 18:10:25.343835  374244 cache.go:115] /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/gcr.io/k8s-minikube/storage-provisioner_v5 exists
	I0620 18:10:25.343845  374244 cache.go:96] cache image "gcr.io/k8s-minikube/storage-provisioner:v5" -> "/home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/gcr.io/k8s-minikube/storage-provisioner_v5" took 96.343µs
	I0620 18:10:25.343950  374244 cache.go:80] save to tar file gcr.io/k8s-minikube/storage-provisioner:v5 -> /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/gcr.io/k8s-minikube/storage-provisioner_v5 succeeded
	I0620 18:10:25.343963  374244 cache.go:107] acquiring lock: {Name:mkd10451646dfb916de1a6eb2fe0396f4af72a3b Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0620 18:10:25.344053  374244 image.go:134] retrieving image: registry.k8s.io/kube-apiserver:v1.30.2
	I0620 18:10:25.344232  374244 cache.go:107] acquiring lock: {Name:mkee8d060f0fb886e89d8e5c583392b7f43c7bdd Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0620 18:10:25.344331  374244 cache.go:107] acquiring lock: {Name:mk04ededaedba903b8dff8c7ec32d396b026e506 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0620 18:10:25.344420  374244 image.go:134] retrieving image: registry.k8s.io/kube-proxy:v1.30.2
	I0620 18:10:25.344516  374244 cache.go:107] acquiring lock: {Name:mkbebf560fd4699e07827812d3acd0c318918852 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0620 18:10:25.344645  374244 cache.go:107] acquiring lock: {Name:mk06f8b9e9d5335d07d14799382f12deab117d6b Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0620 18:10:25.344722  374244 image.go:134] retrieving image: registry.k8s.io/kube-controller-manager:v1.30.2
	I0620 18:10:25.344849  374244 cache.go:107] acquiring lock: {Name:mk17407c60e3cd8dcfba4c3820b6dc24090765d7 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0620 18:10:25.344778  374244 cache.go:107] acquiring lock: {Name:mkc3933946500b54b23529b39e969a689c10d776 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0620 18:10:25.345092  374244 image.go:134] retrieving image: registry.k8s.io/etcd:3.5.12-0
	I0620 18:10:25.345777  374244 image.go:134] retrieving image: registry.k8s.io/pause:3.9
	I0620 18:10:25.346051  374244 image.go:134] retrieving image: registry.k8s.io/kube-scheduler:v1.30.2
	I0620 18:10:25.346969  374244 image.go:134] retrieving image: registry.k8s.io/coredns/coredns:v1.11.1
	I0620 18:10:25.350550  374244 image.go:177] daemon lookup for registry.k8s.io/etcd:3.5.12-0: Error response from daemon: No such image: registry.k8s.io/etcd:3.5.12-0
	I0620 18:10:25.351027  374244 image.go:177] daemon lookup for registry.k8s.io/kube-apiserver:v1.30.2: Error response from daemon: No such image: registry.k8s.io/kube-apiserver:v1.30.2
	I0620 18:10:25.351232  374244 image.go:177] daemon lookup for registry.k8s.io/pause:3.9: Error response from daemon: No such image: registry.k8s.io/pause:3.9
	I0620 18:10:25.351428  374244 image.go:177] daemon lookup for registry.k8s.io/kube-scheduler:v1.30.2: Error response from daemon: No such image: registry.k8s.io/kube-scheduler:v1.30.2
	I0620 18:10:25.351618  374244 image.go:177] daemon lookup for registry.k8s.io/coredns/coredns:v1.11.1: Error response from daemon: No such image: registry.k8s.io/coredns/coredns:v1.11.1
	I0620 18:10:25.352693  374244 image.go:177] daemon lookup for registry.k8s.io/kube-proxy:v1.30.2: Error response from daemon: No such image: registry.k8s.io/kube-proxy:v1.30.2
	I0620 18:10:25.353918  374244 image.go:177] daemon lookup for registry.k8s.io/kube-controller-manager:v1.30.2: Error response from daemon: No such image: registry.k8s.io/kube-controller-manager:v1.30.2
	I0620 18:10:25.364362  374244 image.go:83] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 in local docker daemon, skipping pull
	I0620 18:10:25.364385  374244 cache.go:144] gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 exists in daemon, skipping load
	I0620 18:10:25.364405  374244 cache.go:194] Successfully downloaded all kic artifacts
	I0620 18:10:25.364443  374244 start.go:360] acquireMachinesLock for no-preload-581163: {Name:mk527853ec2d83cc217bc2bea3950d445008ca15 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0620 18:10:25.364559  374244 start.go:364] duration metric: took 93.856µs to acquireMachinesLock for "no-preload-581163"
	I0620 18:10:25.364604  374244 start.go:93] Provisioning new machine with config: &{Name:no-preload-581163 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.30.2 ClusterName:no-preload-581163 Namespace:default APIServerHAVIP: APIServer
Name:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.30.2 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:fa
lse CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.30.2 ContainerRuntime:docker ControlPlane:true Worker:true}
	I0620 18:10:25.364679  374244 start.go:125] createHost starting for "" (driver="docker")
	I0620 18:10:23.434756  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:25.932711  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:25.369007  374244 out.go:204] * Creating docker container (CPUs=2, Memory=2200MB) ...
	I0620 18:10:25.369246  374244 start.go:159] libmachine.API.Create for "no-preload-581163" (driver="docker")
	I0620 18:10:25.369277  374244 client.go:168] LocalClient.Create starting
	I0620 18:10:25.369346  374244 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/19106-2452/.minikube/certs/ca.pem
	I0620 18:10:25.369381  374244 main.go:141] libmachine: Decoding PEM data...
	I0620 18:10:25.369394  374244 main.go:141] libmachine: Parsing certificate...
	I0620 18:10:25.369453  374244 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/19106-2452/.minikube/certs/cert.pem
	I0620 18:10:25.369479  374244 main.go:141] libmachine: Decoding PEM data...
	I0620 18:10:25.369494  374244 main.go:141] libmachine: Parsing certificate...
	I0620 18:10:25.369871  374244 cli_runner.go:164] Run: docker network inspect no-preload-581163 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
	W0620 18:10:25.392403  374244 cli_runner.go:211] docker network inspect no-preload-581163 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
	I0620 18:10:25.392565  374244 network_create.go:284] running [docker network inspect no-preload-581163] to gather additional debugging logs...
	I0620 18:10:25.392607  374244 cli_runner.go:164] Run: docker network inspect no-preload-581163
	W0620 18:10:25.413318  374244 cli_runner.go:211] docker network inspect no-preload-581163 returned with exit code 1
	I0620 18:10:25.413347  374244 network_create.go:287] error running [docker network inspect no-preload-581163]: docker network inspect no-preload-581163: exit status 1
	stdout:
	[]
	
	stderr:
	Error response from daemon: network no-preload-581163 not found
	I0620 18:10:25.413359  374244 network_create.go:289] output of [docker network inspect no-preload-581163]: -- stdout --
	[]
	
	-- /stdout --
	** stderr ** 
	Error response from daemon: network no-preload-581163 not found
	
	** /stderr **
	I0620 18:10:25.413458  374244 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
	I0620 18:10:25.436030  374244 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-9fc608b50896 IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:02:42:1f:52:bf:e7} reservation:<nil>}
	I0620 18:10:25.436434  374244 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-b5401af0733d IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:02:42:5f:fb:31:97} reservation:<nil>}
	I0620 18:10:25.436964  374244 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-914a5cb18dec IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:02:42:8c:e7:e6:fd} reservation:<nil>}
	I0620 18:10:25.437544  374244 network.go:206] using free private subnet 192.168.76.0/24: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x4001b483c0}
	I0620 18:10:25.437584  374244 network_create.go:124] attempt to create docker network no-preload-581163 192.168.76.0/24 with gateway 192.168.76.1 and MTU of 1500 ...
	I0620 18:10:25.437662  374244 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.76.0/24 --gateway=192.168.76.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=no-preload-581163 no-preload-581163
	I0620 18:10:25.518393  374244 network_create.go:108] docker network no-preload-581163 192.168.76.0/24 created
	I0620 18:10:25.518425  374244 kic.go:121] calculated static IP "192.168.76.2" for the "no-preload-581163" container
	I0620 18:10:25.518497  374244 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
	I0620 18:10:25.534658  374244 cli_runner.go:164] Run: docker volume create no-preload-581163 --label name.minikube.sigs.k8s.io=no-preload-581163 --label created_by.minikube.sigs.k8s.io=true
	I0620 18:10:25.551117  374244 oci.go:103] Successfully created a docker volume no-preload-581163
	I0620 18:10:25.551213  374244 cli_runner.go:164] Run: docker run --rm --name no-preload-581163-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=no-preload-581163 --entrypoint /usr/bin/test -v no-preload-581163:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 -d /var/lib
	I0620 18:10:25.887022  374244 cache.go:162] opening:  /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/kube-controller-manager_v1.30.2
	I0620 18:10:25.906506  374244 cache.go:162] opening:  /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/pause_3.9
	I0620 18:10:25.910363  374244 cache.go:162] opening:  /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/etcd_3.5.12-0
	I0620 18:10:25.916621  374244 cache.go:162] opening:  /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/coredns/coredns_v1.11.1
	I0620 18:10:25.936924  374244 cache.go:162] opening:  /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/kube-proxy_v1.30.2
	I0620 18:10:25.962239  374244 cache.go:162] opening:  /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/kube-apiserver_v1.30.2
	I0620 18:10:26.007523  374244 cache.go:162] opening:  /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/kube-scheduler_v1.30.2
	I0620 18:10:26.014658  374244 cache.go:157] /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/pause_3.9 exists
	I0620 18:10:26.014690  374244 cache.go:96] cache image "registry.k8s.io/pause:3.9" -> "/home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/pause_3.9" took 670.360734ms
	I0620 18:10:26.014704  374244 cache.go:80] save to tar file registry.k8s.io/pause:3.9 -> /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/pause_3.9 succeeded
	I0620 18:10:26.262000  374244 oci.go:107] Successfully prepared a docker volume no-preload-581163
	I0620 18:10:26.262032  374244 preload.go:132] Checking if preload exists for k8s version v1.30.2 and runtime docker
	W0620 18:10:26.262159  374244 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
	I0620 18:10:26.262277  374244 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
	I0620 18:10:26.371195  374244 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname no-preload-581163 --name no-preload-581163 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=no-preload-581163 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=no-preload-581163 --network no-preload-581163 --ip 192.168.76.2 --volume no-preload-581163:/var --security-opt apparmor=unconfined --memory=2200mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636
	I0620 18:10:26.711511  374244 cache.go:157] /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/kube-proxy_v1.30.2 exists
	I0620 18:10:26.711615  374244 cache.go:96] cache image "registry.k8s.io/kube-proxy:v1.30.2" -> "/home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/kube-proxy_v1.30.2" took 1.367405795s
	I0620 18:10:26.711644  374244 cache.go:80] save to tar file registry.k8s.io/kube-proxy:v1.30.2 -> /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/kube-proxy_v1.30.2 succeeded
	I0620 18:10:26.797761  374244 cli_runner.go:164] Run: docker container inspect no-preload-581163 --format={{.State.Running}}
	I0620 18:10:26.842087  374244 cli_runner.go:164] Run: docker container inspect no-preload-581163 --format={{.State.Status}}
	I0620 18:10:26.876008  374244 cli_runner.go:164] Run: docker exec no-preload-581163 stat /var/lib/dpkg/alternatives/iptables
	I0620 18:10:26.979680  374244 oci.go:144] the created container "no-preload-581163" has a running status.
	I0620 18:10:26.979705  374244 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/19106-2452/.minikube/machines/no-preload-581163/id_rsa...
	I0620 18:10:27.128500  374244 cache.go:157] /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/coredns/coredns_v1.11.1 exists
	I0620 18:10:27.128545  374244 cache.go:96] cache image "registry.k8s.io/coredns/coredns:v1.11.1" -> "/home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/coredns/coredns_v1.11.1" took 1.78376557s
	I0620 18:10:27.128560  374244 cache.go:80] save to tar file registry.k8s.io/coredns/coredns:v1.11.1 -> /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/coredns/coredns_v1.11.1 succeeded
	I0620 18:10:27.141429  374244 cache.go:157] /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/kube-scheduler_v1.30.2 exists
	I0620 18:10:27.141457  374244 cache.go:96] cache image "registry.k8s.io/kube-scheduler:v1.30.2" -> "/home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/kube-scheduler_v1.30.2" took 1.796608371s
	I0620 18:10:27.141470  374244 cache.go:80] save to tar file registry.k8s.io/kube-scheduler:v1.30.2 -> /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/kube-scheduler_v1.30.2 succeeded
	I0620 18:10:27.284953  374244 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/19106-2452/.minikube/machines/no-preload-581163/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
	I0620 18:10:27.310013  374244 cli_runner.go:164] Run: docker container inspect no-preload-581163 --format={{.State.Status}}
	I0620 18:10:27.335828  374244 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
	I0620 18:10:27.335846  374244 kic_runner.go:114] Args: [docker exec --privileged no-preload-581163 chown docker:docker /home/docker/.ssh/authorized_keys]
	I0620 18:10:27.470194  374244 cli_runner.go:164] Run: docker container inspect no-preload-581163 --format={{.State.Status}}
	I0620 18:10:27.508500  374244 machine.go:94] provisionDockerMachine start ...
	I0620 18:10:27.508603  374244 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-581163
	I0620 18:10:27.563227  374244 main.go:141] libmachine: Using SSH client type: native
	I0620 18:10:27.563490  374244 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e2bc0] 0x3e5420 <nil>  [] 0s} 127.0.0.1 33138 <nil> <nil>}
	I0620 18:10:27.563499  374244 main.go:141] libmachine: About to run SSH command:
	hostname
	I0620 18:10:27.564194  374244 main.go:141] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:52676->127.0.0.1:33138: read: connection reset by peer
	I0620 18:10:27.587781  374244 cache.go:157] /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/kube-controller-manager_v1.30.2 exists
	I0620 18:10:27.587814  374244 cache.go:96] cache image "registry.k8s.io/kube-controller-manager:v1.30.2" -> "/home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/kube-controller-manager_v1.30.2" took 2.243172109s
	I0620 18:10:27.587827  374244 cache.go:80] save to tar file registry.k8s.io/kube-controller-manager:v1.30.2 -> /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/kube-controller-manager_v1.30.2 succeeded
	I0620 18:10:27.691185  374244 cache.go:157] /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/kube-apiserver_v1.30.2 exists
	I0620 18:10:27.691440  374244 cache.go:96] cache image "registry.k8s.io/kube-apiserver:v1.30.2" -> "/home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/kube-apiserver_v1.30.2" took 2.347473981s
	I0620 18:10:27.691485  374244 cache.go:80] save to tar file registry.k8s.io/kube-apiserver:v1.30.2 -> /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/kube-apiserver_v1.30.2 succeeded
	I0620 18:10:28.909295  374244 cache.go:157] /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/etcd_3.5.12-0 exists
	I0620 18:10:28.909366  374244 cache.go:96] cache image "registry.k8s.io/etcd:3.5.12-0" -> "/home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/etcd_3.5.12-0" took 3.564854402s
	I0620 18:10:28.909386  374244 cache.go:80] save to tar file registry.k8s.io/etcd:3.5.12-0 -> /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/etcd_3.5.12-0 succeeded
	I0620 18:10:28.909403  374244 cache.go:87] Successfully saved all images to host disk.
	I0620 18:10:27.933287  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:30.433760  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:30.706785  374244 main.go:141] libmachine: SSH cmd err, output: <nil>: no-preload-581163
	
	I0620 18:10:30.706819  374244 ubuntu.go:169] provisioning hostname "no-preload-581163"
	I0620 18:10:30.706903  374244 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-581163
	I0620 18:10:30.725058  374244 main.go:141] libmachine: Using SSH client type: native
	I0620 18:10:30.725355  374244 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e2bc0] 0x3e5420 <nil>  [] 0s} 127.0.0.1 33138 <nil> <nil>}
	I0620 18:10:30.725374  374244 main.go:141] libmachine: About to run SSH command:
	sudo hostname no-preload-581163 && echo "no-preload-581163" | sudo tee /etc/hostname
	I0620 18:10:30.872291  374244 main.go:141] libmachine: SSH cmd err, output: <nil>: no-preload-581163
	
	I0620 18:10:30.872378  374244 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-581163
	I0620 18:10:30.891250  374244 main.go:141] libmachine: Using SSH client type: native
	I0620 18:10:30.891726  374244 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e2bc0] 0x3e5420 <nil>  [] 0s} 127.0.0.1 33138 <nil> <nil>}
	I0620 18:10:30.891751  374244 main.go:141] libmachine: About to run SSH command:
	
			if ! grep -xq '.*\sno-preload-581163' /etc/hosts; then
				if grep -xq '127.0.1.1\s.*' /etc/hosts; then
					sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 no-preload-581163/g' /etc/hosts;
				else 
					echo '127.0.1.1 no-preload-581163' | sudo tee -a /etc/hosts; 
				fi
			fi
	I0620 18:10:31.023292  374244 main.go:141] libmachine: SSH cmd err, output: <nil>: 
	I0620 18:10:31.023372  374244 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/19106-2452/.minikube CaCertPath:/home/jenkins/minikube-integration/19106-2452/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/19106-2452/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/19106-2452/.minikube}
	I0620 18:10:31.023419  374244 ubuntu.go:177] setting up certificates
	I0620 18:10:31.023437  374244 provision.go:84] configureAuth start
	I0620 18:10:31.023498  374244 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-581163
	I0620 18:10:31.041564  374244 provision.go:143] copyHostCerts
	I0620 18:10:31.041639  374244 exec_runner.go:144] found /home/jenkins/minikube-integration/19106-2452/.minikube/ca.pem, removing ...
	I0620 18:10:31.041655  374244 exec_runner.go:203] rm: /home/jenkins/minikube-integration/19106-2452/.minikube/ca.pem
	I0620 18:10:31.041735  374244 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19106-2452/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/19106-2452/.minikube/ca.pem (1078 bytes)
	I0620 18:10:31.041834  374244 exec_runner.go:144] found /home/jenkins/minikube-integration/19106-2452/.minikube/cert.pem, removing ...
	I0620 18:10:31.041842  374244 exec_runner.go:203] rm: /home/jenkins/minikube-integration/19106-2452/.minikube/cert.pem
	I0620 18:10:31.041870  374244 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19106-2452/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/19106-2452/.minikube/cert.pem (1123 bytes)
	I0620 18:10:31.041926  374244 exec_runner.go:144] found /home/jenkins/minikube-integration/19106-2452/.minikube/key.pem, removing ...
	I0620 18:10:31.041934  374244 exec_runner.go:203] rm: /home/jenkins/minikube-integration/19106-2452/.minikube/key.pem
	I0620 18:10:31.041959  374244 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19106-2452/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/19106-2452/.minikube/key.pem (1675 bytes)
	I0620 18:10:31.042010  374244 provision.go:117] generating server cert: /home/jenkins/minikube-integration/19106-2452/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/19106-2452/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/19106-2452/.minikube/certs/ca-key.pem org=jenkins.no-preload-581163 san=[127.0.0.1 192.168.76.2 localhost minikube no-preload-581163]
	I0620 18:10:31.307877  374244 provision.go:177] copyRemoteCerts
	I0620 18:10:31.307948  374244 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
	I0620 18:10:31.308005  374244 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-581163
	I0620 18:10:31.325058  374244 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33138 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/no-preload-581163/id_rsa Username:docker}
	I0620 18:10:31.420461  374244 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
	I0620 18:10:31.450321  374244 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/machines/server.pem --> /etc/docker/server.pem (1220 bytes)
	I0620 18:10:31.475252  374244 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
	I0620 18:10:31.504446  374244 provision.go:87] duration metric: took 480.995555ms to configureAuth
	I0620 18:10:31.504480  374244 ubuntu.go:193] setting minikube options for container-runtime
	I0620 18:10:31.504678  374244 config.go:182] Loaded profile config "no-preload-581163": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.30.2
	I0620 18:10:31.504737  374244 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-581163
	I0620 18:10:31.523191  374244 main.go:141] libmachine: Using SSH client type: native
	I0620 18:10:31.523434  374244 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e2bc0] 0x3e5420 <nil>  [] 0s} 127.0.0.1 33138 <nil> <nil>}
	I0620 18:10:31.523450  374244 main.go:141] libmachine: About to run SSH command:
	df --output=fstype / | tail -n 1
	I0620 18:10:31.660375  374244 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
	
	I0620 18:10:31.660402  374244 ubuntu.go:71] root file system type: overlay
	I0620 18:10:31.660545  374244 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
	I0620 18:10:31.660650  374244 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-581163
	I0620 18:10:31.678609  374244 main.go:141] libmachine: Using SSH client type: native
	I0620 18:10:31.678863  374244 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e2bc0] 0x3e5420 <nil>  [] 0s} 127.0.0.1 33138 <nil> <nil>}
	I0620 18:10:31.678952  374244 main.go:141] libmachine: About to run SSH command:
	sudo mkdir -p /lib/systemd/system && printf %!s(MISSING) "[Unit]
	Description=Docker Application Container Engine
	Documentation=https://docs.docker.com
	BindsTo=containerd.service
	After=network-online.target firewalld.service containerd.service
	Wants=network-online.target
	Requires=docker.socket
	StartLimitBurst=3
	StartLimitIntervalSec=60
	
	[Service]
	Type=notify
	Restart=on-failure
	
	
	
	# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
	# The base configuration already specifies an 'ExecStart=...' command. The first directive
	# here is to clear out that command inherited from the base configuration. Without this,
	# the command from the base configuration and the command specified here are treated as
	# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
	# will catch this invalid input and refuse to start the service with an error like:
	#  Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
	
	# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
	# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
	ExecStart=
	ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 
	ExecReload=/bin/kill -s HUP \$MAINPID
	
	# Having non-zero Limit*s causes performance problems due to accounting overhead
	# in the kernel. We recommend using cgroups to do container-local accounting.
	LimitNOFILE=infinity
	LimitNPROC=infinity
	LimitCORE=infinity
	
	# Uncomment TasksMax if your systemd version supports it.
	# Only systemd 226 and above support this version.
	TasksMax=infinity
	TimeoutStartSec=0
	
	# set delegate yes so that systemd does not reset the cgroups of docker containers
	Delegate=yes
	
	# kill only the docker process, not all processes in the cgroup
	KillMode=process
	
	[Install]
	WantedBy=multi-user.target
	" | sudo tee /lib/systemd/system/docker.service.new
	I0620 18:10:31.823983  374244 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
	Description=Docker Application Container Engine
	Documentation=https://docs.docker.com
	BindsTo=containerd.service
	After=network-online.target firewalld.service containerd.service
	Wants=network-online.target
	Requires=docker.socket
	StartLimitBurst=3
	StartLimitIntervalSec=60
	
	[Service]
	Type=notify
	Restart=on-failure
	
	
	
	# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
	# The base configuration already specifies an 'ExecStart=...' command. The first directive
	# here is to clear out that command inherited from the base configuration. Without this,
	# the command from the base configuration and the command specified here are treated as
	# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
	# will catch this invalid input and refuse to start the service with an error like:
	#  Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
	
	# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
	# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
	ExecStart=
	ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 
	ExecReload=/bin/kill -s HUP $MAINPID
	
	# Having non-zero Limit*s causes performance problems due to accounting overhead
	# in the kernel. We recommend using cgroups to do container-local accounting.
	LimitNOFILE=infinity
	LimitNPROC=infinity
	LimitCORE=infinity
	
	# Uncomment TasksMax if your systemd version supports it.
	# Only systemd 226 and above support this version.
	TasksMax=infinity
	TimeoutStartSec=0
	
	# set delegate yes so that systemd does not reset the cgroups of docker containers
	Delegate=yes
	
	# kill only the docker process, not all processes in the cgroup
	KillMode=process
	
	[Install]
	WantedBy=multi-user.target
	
	I0620 18:10:31.824089  374244 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-581163
	I0620 18:10:31.842053  374244 main.go:141] libmachine: Using SSH client type: native
	I0620 18:10:31.842319  374244 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e2bc0] 0x3e5420 <nil>  [] 0s} 127.0.0.1 33138 <nil> <nil>}
	I0620 18:10:31.842342  374244 main.go:141] libmachine: About to run SSH command:
	sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
	I0620 18:10:32.701285  374244 main.go:141] libmachine: SSH cmd err, output: <nil>: --- /lib/systemd/system/docker.service	2024-06-05 11:27:22.000000000 +0000
	+++ /lib/systemd/system/docker.service.new	2024-06-20 18:10:31.815399824 +0000
	@@ -1,46 +1,49 @@
	 [Unit]
	 Description=Docker Application Container Engine
	 Documentation=https://docs.docker.com
	-After=network-online.target docker.socket firewalld.service containerd.service time-set.target
	-Wants=network-online.target containerd.service
	+BindsTo=containerd.service
	+After=network-online.target firewalld.service containerd.service
	+Wants=network-online.target
	 Requires=docker.socket
	+StartLimitBurst=3
	+StartLimitIntervalSec=60
	 
	 [Service]
	 Type=notify
	-# the default is not to use systemd for cgroups because the delegate issues still
	-# exists and systemd currently does not support the cgroup feature set required
	-# for containers run by docker
	-ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
	-ExecReload=/bin/kill -s HUP $MAINPID
	-TimeoutStartSec=0
	-RestartSec=2
	-Restart=always
	+Restart=on-failure
	 
	-# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
	-# Both the old, and new location are accepted by systemd 229 and up, so using the old location
	-# to make them work for either version of systemd.
	-StartLimitBurst=3
	 
	-# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
	-# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
	-# this option work for either version of systemd.
	-StartLimitInterval=60s
	+
	+# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
	+# The base configuration already specifies an 'ExecStart=...' command. The first directive
	+# here is to clear out that command inherited from the base configuration. Without this,
	+# the command from the base configuration and the command specified here are treated as
	+# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
	+# will catch this invalid input and refuse to start the service with an error like:
	+#  Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
	+
	+# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
	+# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
	+ExecStart=
	+ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 
	+ExecReload=/bin/kill -s HUP $MAINPID
	 
	 # Having non-zero Limit*s causes performance problems due to accounting overhead
	 # in the kernel. We recommend using cgroups to do container-local accounting.
	+LimitNOFILE=infinity
	 LimitNPROC=infinity
	 LimitCORE=infinity
	 
	-# Comment TasksMax if your systemd version does not support it.
	-# Only systemd 226 and above support this option.
	+# Uncomment TasksMax if your systemd version supports it.
	+# Only systemd 226 and above support this version.
	 TasksMax=infinity
	+TimeoutStartSec=0
	 
	 # set delegate yes so that systemd does not reset the cgroups of docker containers
	 Delegate=yes
	 
	 # kill only the docker process, not all processes in the cgroup
	 KillMode=process
	-OOMScoreAdjust=-500
	 
	 [Install]
	 WantedBy=multi-user.target
	Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
	Executing: /lib/systemd/systemd-sysv-install enable docker
	
	I0620 18:10:32.701378  374244 machine.go:97] duration metric: took 5.192857729s to provisionDockerMachine
	I0620 18:10:32.701415  374244 client.go:171] duration metric: took 7.332128835s to LocalClient.Create
	I0620 18:10:32.701445  374244 start.go:167] duration metric: took 7.332198972s to libmachine.API.Create "no-preload-581163"
	I0620 18:10:32.701459  374244 start.go:293] postStartSetup for "no-preload-581163" (driver="docker")
	I0620 18:10:32.701470  374244 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
	I0620 18:10:32.701562  374244 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
	I0620 18:10:32.701610  374244 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-581163
	I0620 18:10:32.718870  374244 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33138 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/no-preload-581163/id_rsa Username:docker}
	I0620 18:10:32.816530  374244 ssh_runner.go:195] Run: cat /etc/os-release
	I0620 18:10:32.820296  374244 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
	I0620 18:10:32.820330  374244 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
	I0620 18:10:32.820341  374244 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
	I0620 18:10:32.820348  374244 info.go:137] Remote host: Ubuntu 22.04.4 LTS
	I0620 18:10:32.820357  374244 filesync.go:126] Scanning /home/jenkins/minikube-integration/19106-2452/.minikube/addons for local assets ...
	I0620 18:10:32.820423  374244 filesync.go:126] Scanning /home/jenkins/minikube-integration/19106-2452/.minikube/files for local assets ...
	I0620 18:10:32.820565  374244 filesync.go:149] local asset: /home/jenkins/minikube-integration/19106-2452/.minikube/files/etc/ssl/certs/77842.pem -> 77842.pem in /etc/ssl/certs
	I0620 18:10:32.820681  374244 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
	I0620 18:10:32.830092  374244 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/files/etc/ssl/certs/77842.pem --> /etc/ssl/certs/77842.pem (1708 bytes)
	I0620 18:10:32.856387  374244 start.go:296] duration metric: took 154.913455ms for postStartSetup
	I0620 18:10:32.856754  374244 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-581163
	I0620 18:10:32.873440  374244 profile.go:143] Saving config to /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/no-preload-581163/config.json ...
	I0620 18:10:32.873724  374244 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I0620 18:10:32.873778  374244 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-581163
	I0620 18:10:32.890811  374244 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33138 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/no-preload-581163/id_rsa Username:docker}
	I0620 18:10:32.984061  374244 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
	I0620 18:10:32.988718  374244 start.go:128] duration metric: took 7.624024242s to createHost
	I0620 18:10:32.988745  374244 start.go:83] releasing machines lock for "no-preload-581163", held for 7.62417468s
	I0620 18:10:32.988816  374244 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-581163
	I0620 18:10:33.011565  374244 ssh_runner.go:195] Run: cat /version.json
	I0620 18:10:33.011630  374244 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-581163
	I0620 18:10:33.011642  374244 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
	I0620 18:10:33.011706  374244 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-581163
	I0620 18:10:33.038299  374244 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33138 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/no-preload-581163/id_rsa Username:docker}
	I0620 18:10:33.039561  374244 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33138 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/no-preload-581163/id_rsa Username:docker}
	I0620 18:10:33.130884  374244 ssh_runner.go:195] Run: systemctl --version
	I0620 18:10:33.286901  374244 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
	I0620 18:10:33.291422  374244 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
	I0620 18:10:33.318595  374244 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
	I0620 18:10:33.318678  374244 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%!p(MISSING), " -exec sh -c "sudo mv {} {}.mk_disabled" ;
	I0620 18:10:33.350123  374244 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
	I0620 18:10:33.350165  374244 start.go:494] detecting cgroup driver to use...
	I0620 18:10:33.350204  374244 detect.go:196] detected "cgroupfs" cgroup driver on host os
	I0620 18:10:33.350301  374244 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %!s(MISSING) "runtime-endpoint: unix:///run/containerd/containerd.sock
	" | sudo tee /etc/crictl.yaml"
	I0620 18:10:33.368279  374244 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.9"|' /etc/containerd/config.toml"
	I0620 18:10:33.379332  374244 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
	I0620 18:10:33.389691  374244 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
	I0620 18:10:33.389763  374244 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
	I0620 18:10:33.400258  374244 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I0620 18:10:33.410649  374244 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
	I0620 18:10:33.420781  374244 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I0620 18:10:33.433471  374244 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
	I0620 18:10:33.443920  374244 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
	I0620 18:10:33.456471  374244 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
	I0620 18:10:33.467847  374244 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1  enable_unprivileged_ports = true|' /etc/containerd/config.toml"
	I0620 18:10:33.479133  374244 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
	I0620 18:10:33.488228  374244 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
	I0620 18:10:33.497118  374244 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0620 18:10:33.596813  374244 ssh_runner.go:195] Run: sudo systemctl restart containerd
	I0620 18:10:33.706015  374244 start.go:494] detecting cgroup driver to use...
	I0620 18:10:33.706107  374244 detect.go:196] detected "cgroupfs" cgroup driver on host os
	I0620 18:10:33.706185  374244 ssh_runner.go:195] Run: sudo systemctl cat docker.service
	I0620 18:10:33.722198  374244 cruntime.go:279] skipping containerd shutdown because we are bound to it
	I0620 18:10:33.722315  374244 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
	I0620 18:10:33.736876  374244 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %!s(MISSING) "runtime-endpoint: unix:///var/run/cri-dockerd.sock
	" | sudo tee /etc/crictl.yaml"
	I0620 18:10:33.756994  374244 ssh_runner.go:195] Run: which cri-dockerd
	I0620 18:10:33.761088  374244 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
	I0620 18:10:33.775238  374244 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (189 bytes)
	I0620 18:10:33.796146  374244 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
	I0620 18:10:33.905504  374244 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
	I0620 18:10:34.022698  374244 docker.go:574] configuring docker to use "cgroupfs" as cgroup driver...
	I0620 18:10:34.022856  374244 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
	I0620 18:10:34.048007  374244 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0620 18:10:34.155194  374244 ssh_runner.go:195] Run: sudo systemctl restart docker
	I0620 18:10:34.425354  374244 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
	I0620 18:10:34.440313  374244 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
	I0620 18:10:34.453182  374244 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
	I0620 18:10:34.561639  374244 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
	I0620 18:10:34.666404  374244 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0620 18:10:34.759211  374244 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
	I0620 18:10:34.774003  374244 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
	I0620 18:10:34.787740  374244 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0620 18:10:34.881296  374244 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
	I0620 18:10:34.978599  374244 start.go:541] Will wait 60s for socket path /var/run/cri-dockerd.sock
	I0620 18:10:34.978671  374244 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
	I0620 18:10:34.983969  374244 start.go:562] Will wait 60s for crictl version
	I0620 18:10:34.984045  374244 ssh_runner.go:195] Run: which crictl
	I0620 18:10:34.987753  374244 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
	I0620 18:10:35.041174  374244 start.go:578] Version:  0.1.0
	RuntimeName:  docker
	RuntimeVersion:  26.1.4
	RuntimeApiVersion:  v1
	I0620 18:10:35.041251  374244 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
	I0620 18:10:35.064568  374244 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
	I0620 18:10:35.089453  374244 out.go:204] * Preparing Kubernetes v1.30.2 on Docker 26.1.4 ...
	I0620 18:10:35.089577  374244 cli_runner.go:164] Run: docker network inspect no-preload-581163 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
	I0620 18:10:35.111853  374244 ssh_runner.go:195] Run: grep 192.168.76.1	host.minikube.internal$ /etc/hosts
	I0620 18:10:35.115839  374244 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.76.1	host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I0620 18:10:35.127737  374244 kubeadm.go:877] updating cluster {Name:no-preload-581163 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.30.2 ClusterName:no-preload-581163 Namespace:default APIServerHAVIP: APIServerName:minikubeCA API
ServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.30.2 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false Cus
tomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
	I0620 18:10:35.127857  374244 preload.go:132] Checking if preload exists for k8s version v1.30.2 and runtime docker
	I0620 18:10:35.127909  374244 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
	I0620 18:10:32.932786  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:34.932888  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:36.933040  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:35.144645  374244 docker.go:685] Got preloaded images: 
	I0620 18:10:35.144668  374244 docker.go:691] registry.k8s.io/kube-apiserver:v1.30.2 wasn't preloaded
	I0620 18:10:35.144675  374244 cache_images.go:88] LoadCachedImages start: [registry.k8s.io/kube-apiserver:v1.30.2 registry.k8s.io/kube-controller-manager:v1.30.2 registry.k8s.io/kube-scheduler:v1.30.2 registry.k8s.io/kube-proxy:v1.30.2 registry.k8s.io/pause:3.9 registry.k8s.io/etcd:3.5.12-0 registry.k8s.io/coredns/coredns:v1.11.1 gcr.io/k8s-minikube/storage-provisioner:v5]
	I0620 18:10:35.147307  374244 image.go:134] retrieving image: registry.k8s.io/etcd:3.5.12-0
	I0620 18:10:35.147597  374244 image.go:134] retrieving image: registry.k8s.io/kube-apiserver:v1.30.2
	I0620 18:10:35.147913  374244 image.go:134] retrieving image: registry.k8s.io/coredns/coredns:v1.11.1
	I0620 18:10:35.148056  374244 image.go:134] retrieving image: registry.k8s.io/kube-controller-manager:v1.30.2
	I0620 18:10:35.148356  374244 image.go:134] retrieving image: gcr.io/k8s-minikube/storage-provisioner:v5
	I0620 18:10:35.148444  374244 image.go:134] retrieving image: registry.k8s.io/pause:3.9
	I0620 18:10:35.148473  374244 image.go:134] retrieving image: registry.k8s.io/kube-proxy:v1.30.2
	I0620 18:10:35.148653  374244 image.go:134] retrieving image: registry.k8s.io/kube-scheduler:v1.30.2
	I0620 18:10:35.149763  374244 image.go:177] daemon lookup for registry.k8s.io/etcd:3.5.12-0: Error response from daemon: No such image: registry.k8s.io/etcd:3.5.12-0
	I0620 18:10:35.150057  374244 image.go:177] daemon lookup for registry.k8s.io/kube-apiserver:v1.30.2: Error response from daemon: No such image: registry.k8s.io/kube-apiserver:v1.30.2
	I0620 18:10:35.151011  374244 image.go:177] daemon lookup for registry.k8s.io/kube-proxy:v1.30.2: Error response from daemon: No such image: registry.k8s.io/kube-proxy:v1.30.2
	I0620 18:10:35.151070  374244 image.go:177] daemon lookup for registry.k8s.io/pause:3.9: Error response from daemon: No such image: registry.k8s.io/pause:3.9
	I0620 18:10:35.151178  374244 image.go:177] daemon lookup for registry.k8s.io/coredns/coredns:v1.11.1: Error response from daemon: No such image: registry.k8s.io/coredns/coredns:v1.11.1
	I0620 18:10:35.151252  374244 image.go:177] daemon lookup for gcr.io/k8s-minikube/storage-provisioner:v5: Error response from daemon: No such image: gcr.io/k8s-minikube/storage-provisioner:v5
	I0620 18:10:35.151377  374244 image.go:177] daemon lookup for registry.k8s.io/kube-controller-manager:v1.30.2: Error response from daemon: No such image: registry.k8s.io/kube-controller-manager:v1.30.2
	I0620 18:10:35.151617  374244 image.go:177] daemon lookup for registry.k8s.io/kube-scheduler:v1.30.2: Error response from daemon: No such image: registry.k8s.io/kube-scheduler:v1.30.2
	I0620 18:10:35.441081  374244 ssh_runner.go:195] Run: docker image inspect --format {{.Id}} registry.k8s.io/coredns/coredns:v1.11.1
	I0620 18:10:35.468314  374244 cache_images.go:116] "registry.k8s.io/coredns/coredns:v1.11.1" needs transfer: "registry.k8s.io/coredns/coredns:v1.11.1" does not exist at hash "2437cf762177702dec2dfe99a09c37427a15af6d9a57c456b65352667c223d93" in container runtime
	I0620 18:10:35.468384  374244 docker.go:337] Removing image: registry.k8s.io/coredns/coredns:v1.11.1
	I0620 18:10:35.468457  374244 ssh_runner.go:195] Run: docker rmi registry.k8s.io/coredns/coredns:v1.11.1
	I0620 18:10:35.489963  374244 cache_images.go:286] Loading image from: /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/coredns/coredns_v1.11.1
	I0620 18:10:35.490066  374244 ssh_runner.go:195] Run: stat -c "%!s(MISSING) %!y(MISSING)" /var/lib/minikube/images/coredns_v1.11.1
	I0620 18:10:35.493586  374244 ssh_runner.go:352] existence check for /var/lib/minikube/images/coredns_v1.11.1: stat -c "%!s(MISSING) %!y(MISSING)" /var/lib/minikube/images/coredns_v1.11.1: Process exited with status 1
	stdout:
	
	stderr:
	stat: cannot statx '/var/lib/minikube/images/coredns_v1.11.1': No such file or directory
	I0620 18:10:35.493623  374244 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/coredns/coredns_v1.11.1 --> /var/lib/minikube/images/coredns_v1.11.1 (16488960 bytes)
	I0620 18:10:35.528921  374244 ssh_runner.go:195] Run: docker image inspect --format {{.Id}} registry.k8s.io/kube-apiserver:v1.30.2
	I0620 18:10:35.578264  374244 cache_images.go:116] "registry.k8s.io/kube-apiserver:v1.30.2" needs transfer: "registry.k8s.io/kube-apiserver:v1.30.2" does not exist at hash "84c601f3f72c87776cdcf77a73329d1f45297e43a92508b0f289fa2fcf8872a0" in container runtime
	I0620 18:10:35.578323  374244 docker.go:337] Removing image: registry.k8s.io/kube-apiserver:v1.30.2
	I0620 18:10:35.578384  374244 ssh_runner.go:195] Run: docker rmi registry.k8s.io/kube-apiserver:v1.30.2
	I0620 18:10:35.609392  374244 cache_images.go:286] Loading image from: /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/kube-apiserver_v1.30.2
	I0620 18:10:35.609499  374244 ssh_runner.go:195] Run: stat -c "%!s(MISSING) %!y(MISSING)" /var/lib/minikube/images/kube-apiserver_v1.30.2
	I0620 18:10:35.616661  374244 ssh_runner.go:195] Run: docker image inspect --format {{.Id}} registry.k8s.io/etcd:3.5.12-0
	I0620 18:10:35.631289  374244 ssh_runner.go:195] Run: docker image inspect --format {{.Id}} registry.k8s.io/kube-proxy:v1.30.2
	I0620 18:10:35.631360  374244 ssh_runner.go:195] Run: docker image inspect --format {{.Id}} registry.k8s.io/kube-controller-manager:v1.30.2
	I0620 18:10:35.631657  374244 ssh_runner.go:195] Run: docker image inspect --format {{.Id}} registry.k8s.io/pause:3.9
	I0620 18:10:35.639795  374244 ssh_runner.go:195] Run: docker image inspect --format {{.Id}} registry.k8s.io/kube-scheduler:v1.30.2
	W0620 18:10:35.683378  374244 image.go:265] image gcr.io/k8s-minikube/storage-provisioner:v5 arch mismatch: want arm64 got amd64. fixing
	I0620 18:10:35.683665  374244 ssh_runner.go:195] Run: docker image inspect --format {{.Id}} gcr.io/k8s-minikube/storage-provisioner:v5
	I0620 18:10:35.714066  374244 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-apiserver_v1.30.2: stat -c "%!s(MISSING) %!y(MISSING)" /var/lib/minikube/images/kube-apiserver_v1.30.2: Process exited with status 1
	stdout:
	
	stderr:
	stat: cannot statx '/var/lib/minikube/images/kube-apiserver_v1.30.2': No such file or directory
	I0620 18:10:35.714107  374244 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/kube-apiserver_v1.30.2 --> /var/lib/minikube/images/kube-apiserver_v1.30.2 (29947392 bytes)
	I0620 18:10:35.714181  374244 cache_images.go:116] "registry.k8s.io/etcd:3.5.12-0" needs transfer: "registry.k8s.io/etcd:3.5.12-0" does not exist at hash "014faa467e29798aeef733fe6d1a3b5e382688217b053ad23410e6cccd5d22fd" in container runtime
	I0620 18:10:35.714329  374244 docker.go:337] Removing image: registry.k8s.io/etcd:3.5.12-0
	I0620 18:10:35.714385  374244 ssh_runner.go:195] Run: docker rmi registry.k8s.io/etcd:3.5.12-0
	I0620 18:10:35.720676  374244 cache_images.go:116] "registry.k8s.io/pause:3.9" needs transfer: "registry.k8s.io/pause:3.9" does not exist at hash "829e9de338bd5fdd3f16f68f83a9fb288fbc8453e881e5d5cfd0f6f2ff72b43e" in container runtime
	I0620 18:10:35.720763  374244 docker.go:337] Removing image: registry.k8s.io/pause:3.9
	I0620 18:10:35.720821  374244 ssh_runner.go:195] Run: docker rmi registry.k8s.io/pause:3.9
	I0620 18:10:35.765866  374244 cache_images.go:116] "registry.k8s.io/kube-proxy:v1.30.2" needs transfer: "registry.k8s.io/kube-proxy:v1.30.2" does not exist at hash "66dbb96a9149f69913ff817f696be766014cacdffc2ce0889a76c81165415fae" in container runtime
	I0620 18:10:35.765910  374244 docker.go:337] Removing image: registry.k8s.io/kube-proxy:v1.30.2
	I0620 18:10:35.765986  374244 ssh_runner.go:195] Run: docker rmi registry.k8s.io/kube-proxy:v1.30.2
	I0620 18:10:35.766055  374244 cache_images.go:116] "registry.k8s.io/kube-controller-manager:v1.30.2" needs transfer: "registry.k8s.io/kube-controller-manager:v1.30.2" does not exist at hash "e1dcc3400d3ea6a268c7ea6e66c3a196703770a8e346b695f54344ab53a47567" in container runtime
	I0620 18:10:35.766087  374244 docker.go:337] Removing image: registry.k8s.io/kube-controller-manager:v1.30.2
	I0620 18:10:35.766131  374244 ssh_runner.go:195] Run: docker rmi registry.k8s.io/kube-controller-manager:v1.30.2
	I0620 18:10:35.766178  374244 cache_images.go:116] "registry.k8s.io/kube-scheduler:v1.30.2" needs transfer: "registry.k8s.io/kube-scheduler:v1.30.2" does not exist at hash "c7dd04b1bafeb51c650fde7f34ac0fdafa96030e77ea7a822135ff302d895dd5" in container runtime
	I0620 18:10:35.766228  374244 docker.go:337] Removing image: registry.k8s.io/kube-scheduler:v1.30.2
	I0620 18:10:35.766261  374244 ssh_runner.go:195] Run: docker rmi registry.k8s.io/kube-scheduler:v1.30.2
	I0620 18:10:35.766320  374244 cache_images.go:116] "gcr.io/k8s-minikube/storage-provisioner:v5" needs transfer: "gcr.io/k8s-minikube/storage-provisioner:v5" does not exist at hash "66749159455b3f08c8318fe0233122f54d0f5889f9c5fdfb73c3fd9d99895b51" in container runtime
	I0620 18:10:35.766342  374244 docker.go:337] Removing image: gcr.io/k8s-minikube/storage-provisioner:v5
	I0620 18:10:35.766382  374244 ssh_runner.go:195] Run: docker rmi gcr.io/k8s-minikube/storage-provisioner:v5
	I0620 18:10:35.766437  374244 cache_images.go:286] Loading image from: /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/etcd_3.5.12-0
	I0620 18:10:35.766517  374244 ssh_runner.go:195] Run: stat -c "%!s(MISSING) %!y(MISSING)" /var/lib/minikube/images/etcd_3.5.12-0
	I0620 18:10:35.779840  374244 cache_images.go:286] Loading image from: /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/pause_3.9
	I0620 18:10:35.779966  374244 ssh_runner.go:195] Run: stat -c "%!s(MISSING) %!y(MISSING)" /var/lib/minikube/images/pause_3.9
	I0620 18:10:35.849606  374244 ssh_runner.go:352] existence check for /var/lib/minikube/images/etcd_3.5.12-0: stat -c "%!s(MISSING) %!y(MISSING)" /var/lib/minikube/images/etcd_3.5.12-0: Process exited with status 1
	stdout:
	
	stderr:
	stat: cannot statx '/var/lib/minikube/images/etcd_3.5.12-0': No such file or directory
	I0620 18:10:35.849698  374244 cache_images.go:286] Loading image from: /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/kube-scheduler_v1.30.2
	I0620 18:10:35.849803  374244 ssh_runner.go:195] Run: stat -c "%!s(MISSING) %!y(MISSING)" /var/lib/minikube/images/kube-scheduler_v1.30.2
	I0620 18:10:35.849874  374244 cache_images.go:286] Loading image from: /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/kube-proxy_v1.30.2
	I0620 18:10:35.849941  374244 ssh_runner.go:195] Run: stat -c "%!s(MISSING) %!y(MISSING)" /var/lib/minikube/images/kube-proxy_v1.30.2
	I0620 18:10:35.849996  374244 cache_images.go:286] Loading image from: /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/kube-controller-manager_v1.30.2
	I0620 18:10:35.850057  374244 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/etcd_3.5.12-0 --> /var/lib/minikube/images/etcd_3.5.12-0 (66196992 bytes)
	I0620 18:10:35.850119  374244 cache_images.go:286] Loading image from: /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/gcr.io/k8s-minikube/storage-provisioner_v5
	I0620 18:10:35.850170  374244 ssh_runner.go:195] Run: stat -c "%!s(MISSING) %!y(MISSING)" /var/lib/minikube/images/kube-controller-manager_v1.30.2
	I0620 18:10:35.850224  374244 ssh_runner.go:352] existence check for /var/lib/minikube/images/pause_3.9: stat -c "%!s(MISSING) %!y(MISSING)" /var/lib/minikube/images/pause_3.9: Process exited with status 1
	stdout:
	
	stderr:
	stat: cannot statx '/var/lib/minikube/images/pause_3.9': No such file or directory
	I0620 18:10:35.850241  374244 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/pause_3.9 --> /var/lib/minikube/images/pause_3.9 (268288 bytes)
	I0620 18:10:35.850302  374244 ssh_runner.go:195] Run: stat -c "%!s(MISSING) %!y(MISSING)" /var/lib/minikube/images/storage-provisioner_v5
	I0620 18:10:35.885054  374244 ssh_runner.go:352] existence check for /var/lib/minikube/images/storage-provisioner_v5: stat -c "%!s(MISSING) %!y(MISSING)" /var/lib/minikube/images/storage-provisioner_v5: Process exited with status 1
	stdout:
	
	stderr:
	stat: cannot statx '/var/lib/minikube/images/storage-provisioner_v5': No such file or directory
	I0620 18:10:35.885095  374244 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/gcr.io/k8s-minikube/storage-provisioner_v5 --> /var/lib/minikube/images/storage-provisioner_v5 (8035840 bytes)
	I0620 18:10:35.885180  374244 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-scheduler_v1.30.2: stat -c "%!s(MISSING) %!y(MISSING)" /var/lib/minikube/images/kube-scheduler_v1.30.2: Process exited with status 1
	stdout:
	
	stderr:
	stat: cannot statx '/var/lib/minikube/images/kube-scheduler_v1.30.2': No such file or directory
	I0620 18:10:35.885222  374244 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/kube-scheduler_v1.30.2 --> /var/lib/minikube/images/kube-scheduler_v1.30.2 (17653248 bytes)
	I0620 18:10:35.885271  374244 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-proxy_v1.30.2: stat -c "%!s(MISSING) %!y(MISSING)" /var/lib/minikube/images/kube-proxy_v1.30.2: Process exited with status 1
	stdout:
	
	stderr:
	stat: cannot statx '/var/lib/minikube/images/kube-proxy_v1.30.2': No such file or directory
	I0620 18:10:35.885285  374244 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/kube-proxy_v1.30.2 --> /var/lib/minikube/images/kube-proxy_v1.30.2 (25635840 bytes)
	I0620 18:10:35.885352  374244 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-controller-manager_v1.30.2: stat -c "%!s(MISSING) %!y(MISSING)" /var/lib/minikube/images/kube-controller-manager_v1.30.2: Process exited with status 1
	stdout:
	
	stderr:
	stat: cannot statx '/var/lib/minikube/images/kube-controller-manager_v1.30.2': No such file or directory
	I0620 18:10:35.885368  374244 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/kube-controller-manager_v1.30.2 --> /var/lib/minikube/images/kube-controller-manager_v1.30.2 (28379136 bytes)
	I0620 18:10:35.940896  374244 docker.go:304] Loading image: /var/lib/minikube/images/pause_3.9
	I0620 18:10:35.940962  374244 ssh_runner.go:195] Run: /bin/bash -c "sudo cat /var/lib/minikube/images/pause_3.9 | docker load"
	I0620 18:10:36.140782  374244 cache_images.go:315] Transferred and loaded /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/pause_3.9 from cache
	I0620 18:10:36.140843  374244 docker.go:304] Loading image: /var/lib/minikube/images/coredns_v1.11.1
	I0620 18:10:36.140864  374244 ssh_runner.go:195] Run: /bin/bash -c "sudo cat /var/lib/minikube/images/coredns_v1.11.1 | docker load"
	I0620 18:10:37.611804  374244 ssh_runner.go:235] Completed: /bin/bash -c "sudo cat /var/lib/minikube/images/coredns_v1.11.1 | docker load": (1.470918939s)
	I0620 18:10:37.611829  374244 cache_images.go:315] Transferred and loaded /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/coredns/coredns_v1.11.1 from cache
	I0620 18:10:37.611847  374244 docker.go:304] Loading image: /var/lib/minikube/images/storage-provisioner_v5
	I0620 18:10:37.611860  374244 ssh_runner.go:195] Run: /bin/bash -c "sudo cat /var/lib/minikube/images/storage-provisioner_v5 | docker load"
	I0620 18:10:37.872572  374244 cache_images.go:315] Transferred and loaded /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/gcr.io/k8s-minikube/storage-provisioner_v5 from cache
	I0620 18:10:37.872610  374244 docker.go:304] Loading image: /var/lib/minikube/images/kube-scheduler_v1.30.2
	I0620 18:10:37.872623  374244 ssh_runner.go:195] Run: /bin/bash -c "sudo cat /var/lib/minikube/images/kube-scheduler_v1.30.2 | docker load"
	I0620 18:10:38.870952  374244 cache_images.go:315] Transferred and loaded /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/kube-scheduler_v1.30.2 from cache
	I0620 18:10:38.871016  374244 docker.go:304] Loading image: /var/lib/minikube/images/kube-apiserver_v1.30.2
	I0620 18:10:38.871031  374244 ssh_runner.go:195] Run: /bin/bash -c "sudo cat /var/lib/minikube/images/kube-apiserver_v1.30.2 | docker load"
	I0620 18:10:39.798239  374244 cache_images.go:315] Transferred and loaded /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/kube-apiserver_v1.30.2 from cache
	I0620 18:10:39.798277  374244 docker.go:304] Loading image: /var/lib/minikube/images/kube-proxy_v1.30.2
	I0620 18:10:39.798300  374244 ssh_runner.go:195] Run: /bin/bash -c "sudo cat /var/lib/minikube/images/kube-proxy_v1.30.2 | docker load"
	I0620 18:10:38.933262  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:41.441396  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:40.852115  374244 ssh_runner.go:235] Completed: /bin/bash -c "sudo cat /var/lib/minikube/images/kube-proxy_v1.30.2 | docker load": (1.053789832s)
	I0620 18:10:40.852144  374244 cache_images.go:315] Transferred and loaded /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/kube-proxy_v1.30.2 from cache
	I0620 18:10:40.852163  374244 docker.go:304] Loading image: /var/lib/minikube/images/kube-controller-manager_v1.30.2
	I0620 18:10:40.852174  374244 ssh_runner.go:195] Run: /bin/bash -c "sudo cat /var/lib/minikube/images/kube-controller-manager_v1.30.2 | docker load"
	I0620 18:10:41.756528  374244 cache_images.go:315] Transferred and loaded /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/kube-controller-manager_v1.30.2 from cache
	I0620 18:10:41.756570  374244 docker.go:304] Loading image: /var/lib/minikube/images/etcd_3.5.12-0
	I0620 18:10:41.756597  374244 ssh_runner.go:195] Run: /bin/bash -c "sudo cat /var/lib/minikube/images/etcd_3.5.12-0 | docker load"
	I0620 18:10:43.743915  374244 ssh_runner.go:235] Completed: /bin/bash -c "sudo cat /var/lib/minikube/images/etcd_3.5.12-0 | docker load": (1.987292245s)
	I0620 18:10:43.743943  374244 cache_images.go:315] Transferred and loaded /home/jenkins/minikube-integration/19106-2452/.minikube/cache/images/arm64/registry.k8s.io/etcd_3.5.12-0 from cache
	I0620 18:10:43.743964  374244 cache_images.go:123] Successfully loaded all cached images
	I0620 18:10:43.743969  374244 cache_images.go:92] duration metric: took 8.59928085s to LoadCachedImages
	I0620 18:10:43.743987  374244 kubeadm.go:928] updating node { 192.168.76.2 8443 v1.30.2 docker true true} ...
	I0620 18:10:43.744096  374244 kubeadm.go:940] kubelet [Unit]
	Wants=docker.socket
	
	[Service]
	ExecStart=
	ExecStart=/var/lib/minikube/binaries/v1.30.2/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=no-preload-581163 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.76.2
	
	[Install]
	 config:
	{KubernetesVersion:v1.30.2 ClusterName:no-preload-581163 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
	I0620 18:10:43.744170  374244 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
	I0620 18:10:43.793777  374244 cni.go:84] Creating CNI manager for ""
	I0620 18:10:43.793802  374244 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
	I0620 18:10:43.793815  374244 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
	I0620 18:10:43.793835  374244 kubeadm.go:181] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.76.2 APIServerPort:8443 KubernetesVersion:v1.30.2 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:no-preload-581163 NodeName:no-preload-581163 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.76.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.76.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/e
tc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
	I0620 18:10:43.793973  374244 kubeadm.go:187] kubeadm config:
	apiVersion: kubeadm.k8s.io/v1beta3
	kind: InitConfiguration
	localAPIEndpoint:
	  advertiseAddress: 192.168.76.2
	  bindPort: 8443
	bootstrapTokens:
	  - groups:
	      - system:bootstrappers:kubeadm:default-node-token
	    ttl: 24h0m0s
	    usages:
	      - signing
	      - authentication
	nodeRegistration:
	  criSocket: unix:///var/run/cri-dockerd.sock
	  name: "no-preload-581163"
	  kubeletExtraArgs:
	    node-ip: 192.168.76.2
	  taints: []
	---
	apiVersion: kubeadm.k8s.io/v1beta3
	kind: ClusterConfiguration
	apiServer:
	  certSANs: ["127.0.0.1", "localhost", "192.168.76.2"]
	  extraArgs:
	    enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
	controllerManager:
	  extraArgs:
	    allocate-node-cidrs: "true"
	    leader-elect: "false"
	scheduler:
	  extraArgs:
	    leader-elect: "false"
	certificatesDir: /var/lib/minikube/certs
	clusterName: mk
	controlPlaneEndpoint: control-plane.minikube.internal:8443
	etcd:
	  local:
	    dataDir: /var/lib/minikube/etcd
	    extraArgs:
	      proxy-refresh-interval: "70000"
	kubernetesVersion: v1.30.2
	networking:
	  dnsDomain: cluster.local
	  podSubnet: "10.244.0.0/16"
	  serviceSubnet: 10.96.0.0/12
	---
	apiVersion: kubelet.config.k8s.io/v1beta1
	kind: KubeletConfiguration
	authentication:
	  x509:
	    clientCAFile: /var/lib/minikube/certs/ca.crt
	cgroupDriver: cgroupfs
	containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
	hairpinMode: hairpin-veth
	runtimeRequestTimeout: 15m
	clusterDomain: "cluster.local"
	# disable disk resource management by default
	imageGCHighThresholdPercent: 100
	evictionHard:
	  nodefs.available: "0%!"(MISSING)
	  nodefs.inodesFree: "0%!"(MISSING)
	  imagefs.available: "0%!"(MISSING)
	failSwapOn: false
	staticPodPath: /etc/kubernetes/manifests
	---
	apiVersion: kubeproxy.config.k8s.io/v1alpha1
	kind: KubeProxyConfiguration
	clusterCIDR: "10.244.0.0/16"
	metricsBindAddress: 0.0.0.0:10249
	conntrack:
	  maxPerCore: 0
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
	  tcpEstablishedTimeout: 0s
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
	  tcpCloseWaitTimeout: 0s
	
	I0620 18:10:43.794046  374244 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.30.2
	I0620 18:10:43.803880  374244 binaries.go:47] Didn't find k8s binaries: sudo ls /var/lib/minikube/binaries/v1.30.2: Process exited with status 2
	stdout:
	
	stderr:
	ls: cannot access '/var/lib/minikube/binaries/v1.30.2': No such file or directory
	
	Initiating transfer...
	I0620 18:10:43.803947  374244 ssh_runner.go:195] Run: sudo mkdir -p /var/lib/minikube/binaries/v1.30.2
	I0620 18:10:43.813121  374244 binary.go:76] Not caching binary, using https://dl.k8s.io/release/v1.30.2/bin/linux/arm64/kubectl?checksum=file:https://dl.k8s.io/release/v1.30.2/bin/linux/arm64/kubectl.sha256
	I0620 18:10:43.813219  374244 ssh_runner.go:195] Run: stat -c "%!s(MISSING) %!y(MISSING)" /var/lib/minikube/binaries/v1.30.2/kubectl
	I0620 18:10:43.813803  374244 download.go:107] Downloading: https://dl.k8s.io/release/v1.30.2/bin/linux/arm64/kubeadm?checksum=file:https://dl.k8s.io/release/v1.30.2/bin/linux/arm64/kubeadm.sha256 -> /home/jenkins/minikube-integration/19106-2452/.minikube/cache/linux/arm64/v1.30.2/kubeadm
	I0620 18:10:43.814011  374244 download.go:107] Downloading: https://dl.k8s.io/release/v1.30.2/bin/linux/arm64/kubelet?checksum=file:https://dl.k8s.io/release/v1.30.2/bin/linux/arm64/kubelet.sha256 -> /home/jenkins/minikube-integration/19106-2452/.minikube/cache/linux/arm64/v1.30.2/kubelet
	I0620 18:10:43.818027  374244 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.30.2/kubectl: stat -c "%!s(MISSING) %!y(MISSING)" /var/lib/minikube/binaries/v1.30.2/kubectl: Process exited with status 1
	stdout:
	
	stderr:
	stat: cannot statx '/var/lib/minikube/binaries/v1.30.2/kubectl': No such file or directory
	I0620 18:10:43.818062  374244 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/cache/linux/arm64/v1.30.2/kubectl --> /var/lib/minikube/binaries/v1.30.2/kubectl (49938584 bytes)
	I0620 18:10:44.533801  374244 ssh_runner.go:195] Run: stat -c "%!s(MISSING) %!y(MISSING)" /var/lib/minikube/binaries/v1.30.2/kubeadm
	I0620 18:10:44.538845  374244 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.30.2/kubeadm: stat -c "%!s(MISSING) %!y(MISSING)" /var/lib/minikube/binaries/v1.30.2/kubeadm: Process exited with status 1
	stdout:
	
	stderr:
	stat: cannot statx '/var/lib/minikube/binaries/v1.30.2/kubeadm': No such file or directory
	I0620 18:10:44.538887  374244 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/cache/linux/arm64/v1.30.2/kubeadm --> /var/lib/minikube/binaries/v1.30.2/kubeadm (48955544 bytes)
	I0620 18:10:45.067302  374244 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I0620 18:10:45.087469  374244 ssh_runner.go:195] Run: stat -c "%!s(MISSING) %!y(MISSING)" /var/lib/minikube/binaries/v1.30.2/kubelet
	I0620 18:10:45.094567  374244 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.30.2/kubelet: stat -c "%!s(MISSING) %!y(MISSING)" /var/lib/minikube/binaries/v1.30.2/kubelet: Process exited with status 1
	stdout:
	
	stderr:
	stat: cannot statx '/var/lib/minikube/binaries/v1.30.2/kubelet': No such file or directory
	I0620 18:10:45.094612  374244 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/cache/linux/arm64/v1.30.2/kubelet --> /var/lib/minikube/binaries/v1.30.2/kubelet (96463128 bytes)
	I0620 18:10:43.954833  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:46.530593  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:45.714593  374244 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
	I0620 18:10:45.724665  374244 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (316 bytes)
	I0620 18:10:45.744135  374244 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
	I0620 18:10:45.764040  374244 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2159 bytes)
	I0620 18:10:45.783885  374244 ssh_runner.go:195] Run: grep 192.168.76.2	control-plane.minikube.internal$ /etc/hosts
	I0620 18:10:45.787547  374244 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.76.2	control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I0620 18:10:45.800633  374244 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0620 18:10:45.903546  374244 ssh_runner.go:195] Run: sudo systemctl start kubelet
	I0620 18:10:45.922094  374244 certs.go:68] Setting up /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/no-preload-581163 for IP: 192.168.76.2
	I0620 18:10:45.922154  374244 certs.go:194] generating shared ca certs ...
	I0620 18:10:45.922184  374244 certs.go:226] acquiring lock for ca certs: {Name:mk1f8a102b3933d1e67f4b3f5a97c6bde91190df Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0620 18:10:45.922343  374244 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/19106-2452/.minikube/ca.key
	I0620 18:10:45.922410  374244 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/19106-2452/.minikube/proxy-client-ca.key
	I0620 18:10:45.922456  374244 certs.go:256] generating profile certs ...
	I0620 18:10:45.922543  374244 certs.go:363] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/no-preload-581163/client.key
	I0620 18:10:45.922589  374244 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/no-preload-581163/client.crt with IP's: []
	I0620 18:10:46.306291  374244 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/no-preload-581163/client.crt ...
	I0620 18:10:46.306361  374244 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/no-preload-581163/client.crt: {Name:mka3b4d553cc119a74ee0a077766b116258d014c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0620 18:10:46.307212  374244 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/no-preload-581163/client.key ...
	I0620 18:10:46.307229  374244 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/no-preload-581163/client.key: {Name:mkdd6c1442c582cbf1a959446667cfde1a63cf26 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0620 18:10:46.307335  374244 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/no-preload-581163/apiserver.key.7e6ca817
	I0620 18:10:46.307358  374244 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/no-preload-581163/apiserver.crt.7e6ca817 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.76.2]
	I0620 18:10:46.929484  374244 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/no-preload-581163/apiserver.crt.7e6ca817 ...
	I0620 18:10:46.929512  374244 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/no-preload-581163/apiserver.crt.7e6ca817: {Name:mka752e639e97333050738282c8cc6a508cfb4f0 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0620 18:10:46.929675  374244 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/no-preload-581163/apiserver.key.7e6ca817 ...
	I0620 18:10:46.929686  374244 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/no-preload-581163/apiserver.key.7e6ca817: {Name:mk34759e0fa1614ef83e0929ecfd6a5c5d3db620 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0620 18:10:46.929758  374244 certs.go:381] copying /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/no-preload-581163/apiserver.crt.7e6ca817 -> /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/no-preload-581163/apiserver.crt
	I0620 18:10:46.929828  374244 certs.go:385] copying /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/no-preload-581163/apiserver.key.7e6ca817 -> /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/no-preload-581163/apiserver.key
	I0620 18:10:46.929878  374244 certs.go:363] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/no-preload-581163/proxy-client.key
	I0620 18:10:46.929894  374244 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/no-preload-581163/proxy-client.crt with IP's: []
	I0620 18:10:47.491736  374244 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/no-preload-581163/proxy-client.crt ...
	I0620 18:10:47.491765  374244 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/no-preload-581163/proxy-client.crt: {Name:mk1800b16acb258c10e8281e5efbd4d64210a20c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0620 18:10:47.491974  374244 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/no-preload-581163/proxy-client.key ...
	I0620 18:10:47.491997  374244 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/no-preload-581163/proxy-client.key: {Name:mkb7f0fc5637157da44438ef72e369e98b9b596a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0620 18:10:47.492196  374244 certs.go:484] found cert: /home/jenkins/minikube-integration/19106-2452/.minikube/certs/7784.pem (1338 bytes)
	W0620 18:10:47.492242  374244 certs.go:480] ignoring /home/jenkins/minikube-integration/19106-2452/.minikube/certs/7784_empty.pem, impossibly tiny 0 bytes
	I0620 18:10:47.492255  374244 certs.go:484] found cert: /home/jenkins/minikube-integration/19106-2452/.minikube/certs/ca-key.pem (1675 bytes)
	I0620 18:10:47.492280  374244 certs.go:484] found cert: /home/jenkins/minikube-integration/19106-2452/.minikube/certs/ca.pem (1078 bytes)
	I0620 18:10:47.492310  374244 certs.go:484] found cert: /home/jenkins/minikube-integration/19106-2452/.minikube/certs/cert.pem (1123 bytes)
	I0620 18:10:47.492335  374244 certs.go:484] found cert: /home/jenkins/minikube-integration/19106-2452/.minikube/certs/key.pem (1675 bytes)
	I0620 18:10:47.492385  374244 certs.go:484] found cert: /home/jenkins/minikube-integration/19106-2452/.minikube/files/etc/ssl/certs/77842.pem (1708 bytes)
	I0620 18:10:47.493050  374244 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
	I0620 18:10:47.519069  374244 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
	I0620 18:10:47.555847  374244 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
	I0620 18:10:47.586472  374244 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
	I0620 18:10:47.618627  374244 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/no-preload-581163/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
	I0620 18:10:47.643882  374244 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/no-preload-581163/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
	I0620 18:10:47.670082  374244 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/no-preload-581163/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
	I0620 18:10:47.695313  374244 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/no-preload-581163/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
	I0620 18:10:47.724765  374244 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/files/etc/ssl/certs/77842.pem --> /usr/share/ca-certificates/77842.pem (1708 bytes)
	I0620 18:10:47.751788  374244 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
	I0620 18:10:47.776780  374244 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19106-2452/.minikube/certs/7784.pem --> /usr/share/ca-certificates/7784.pem (1338 bytes)
	I0620 18:10:47.801639  374244 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
	I0620 18:10:47.819523  374244 ssh_runner.go:195] Run: openssl version
	I0620 18:10:47.826694  374244 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/77842.pem && ln -fs /usr/share/ca-certificates/77842.pem /etc/ssl/certs/77842.pem"
	I0620 18:10:47.836825  374244 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/77842.pem
	I0620 18:10:47.840453  374244 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Jun 20 17:09 /usr/share/ca-certificates/77842.pem
	I0620 18:10:47.840558  374244 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/77842.pem
	I0620 18:10:47.849427  374244 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/77842.pem /etc/ssl/certs/3ec20f2e.0"
	I0620 18:10:47.858798  374244 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
	I0620 18:10:47.868718  374244 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
	I0620 18:10:47.872663  374244 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Jun 20 17:02 /usr/share/ca-certificates/minikubeCA.pem
	I0620 18:10:47.872746  374244 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
	I0620 18:10:47.879498  374244 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
	I0620 18:10:47.888559  374244 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/7784.pem && ln -fs /usr/share/ca-certificates/7784.pem /etc/ssl/certs/7784.pem"
	I0620 18:10:47.897827  374244 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/7784.pem
	I0620 18:10:47.901415  374244 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Jun 20 17:09 /usr/share/ca-certificates/7784.pem
	I0620 18:10:47.901482  374244 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/7784.pem
	I0620 18:10:47.908648  374244 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/7784.pem /etc/ssl/certs/51391683.0"
	I0620 18:10:47.918121  374244 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
	I0620 18:10:47.921295  374244 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
	stdout:
	
	stderr:
	stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
	I0620 18:10:47.921350  374244 kubeadm.go:391] StartCluster: {Name:no-preload-581163 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.30.2 ClusterName:no-preload-581163 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APISer
verNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.30.2 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false Custom
QemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I0620 18:10:47.921475  374244 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
	I0620 18:10:47.940408  374244 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
	I0620 18:10:47.949541  374244 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
	I0620 18:10:47.958132  374244 kubeadm.go:213] ignoring SystemVerification for kubeadm because of docker driver
	I0620 18:10:47.958195  374244 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
	I0620 18:10:47.966769  374244 kubeadm.go:154] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
	stdout:
	
	stderr:
	ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
	I0620 18:10:47.966786  374244 kubeadm.go:156] found existing configuration files:
	
	I0620 18:10:47.966850  374244 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
	I0620 18:10:47.975823  374244 kubeadm.go:162] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
	stdout:
	
	stderr:
	grep: /etc/kubernetes/admin.conf: No such file or directory
	I0620 18:10:47.975887  374244 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
	I0620 18:10:47.985511  374244 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
	I0620 18:10:47.994094  374244 kubeadm.go:162] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
	stdout:
	
	stderr:
	grep: /etc/kubernetes/kubelet.conf: No such file or directory
	I0620 18:10:47.994203  374244 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
	I0620 18:10:48.007107  374244 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
	I0620 18:10:48.018749  374244 kubeadm.go:162] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
	stdout:
	
	stderr:
	grep: /etc/kubernetes/controller-manager.conf: No such file or directory
	I0620 18:10:48.018879  374244 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
	I0620 18:10:48.028858  374244 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
	I0620 18:10:48.039630  374244 kubeadm.go:162] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
	stdout:
	
	stderr:
	grep: /etc/kubernetes/scheduler.conf: No such file or directory
	I0620 18:10:48.039714  374244 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
	I0620 18:10:48.049455  374244 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.30.2:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml  --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
	I0620 18:10:48.099107  374244 kubeadm.go:309] [init] Using Kubernetes version: v1.30.2
	I0620 18:10:48.099201  374244 kubeadm.go:309] [preflight] Running pre-flight checks
	I0620 18:10:48.143226  374244 kubeadm.go:309] [preflight] The system verification failed. Printing the output from the verification:
	I0620 18:10:48.143301  374244 kubeadm.go:309] KERNEL_VERSION: 5.15.0-1063-aws
	I0620 18:10:48.143343  374244 kubeadm.go:309] OS: Linux
	I0620 18:10:48.143394  374244 kubeadm.go:309] CGROUPS_CPU: enabled
	I0620 18:10:48.143445  374244 kubeadm.go:309] CGROUPS_CPUACCT: enabled
	I0620 18:10:48.143496  374244 kubeadm.go:309] CGROUPS_CPUSET: enabled
	I0620 18:10:48.143545  374244 kubeadm.go:309] CGROUPS_DEVICES: enabled
	I0620 18:10:48.143596  374244 kubeadm.go:309] CGROUPS_FREEZER: enabled
	I0620 18:10:48.143645  374244 kubeadm.go:309] CGROUPS_MEMORY: enabled
	I0620 18:10:48.143692  374244 kubeadm.go:309] CGROUPS_PIDS: enabled
	I0620 18:10:48.143740  374244 kubeadm.go:309] CGROUPS_HUGETLB: enabled
	I0620 18:10:48.143788  374244 kubeadm.go:309] CGROUPS_BLKIO: enabled
	I0620 18:10:48.226760  374244 kubeadm.go:309] [preflight] Pulling images required for setting up a Kubernetes cluster
	I0620 18:10:48.226954  374244 kubeadm.go:309] [preflight] This might take a minute or two, depending on the speed of your internet connection
	I0620 18:10:48.227101  374244 kubeadm.go:309] [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
	I0620 18:10:48.503785  374244 kubeadm.go:309] [certs] Using certificateDir folder "/var/lib/minikube/certs"
	I0620 18:10:48.508695  374244 out.go:204]   - Generating certificates and keys ...
	I0620 18:10:48.508886  374244 kubeadm.go:309] [certs] Using existing ca certificate authority
	I0620 18:10:48.508994  374244 kubeadm.go:309] [certs] Using existing apiserver certificate and key on disk
	I0620 18:10:48.745400  374244 kubeadm.go:309] [certs] Generating "apiserver-kubelet-client" certificate and key
	I0620 18:10:49.240360  374244 kubeadm.go:309] [certs] Generating "front-proxy-ca" certificate and key
	I0620 18:10:49.872758  374244 kubeadm.go:309] [certs] Generating "front-proxy-client" certificate and key
	I0620 18:10:48.932893  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:50.936039  362254 pod_ready.go:102] pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace has status "Ready":"False"
	I0620 18:10:50.541579  374244 kubeadm.go:309] [certs] Generating "etcd/ca" certificate and key
	I0620 18:10:51.158562  374244 kubeadm.go:309] [certs] Generating "etcd/server" certificate and key
	I0620 18:10:51.158859  374244 kubeadm.go:309] [certs] etcd/server serving cert is signed for DNS names [localhost no-preload-581163] and IPs [192.168.76.2 127.0.0.1 ::1]
	I0620 18:10:51.559660  374244 kubeadm.go:309] [certs] Generating "etcd/peer" certificate and key
	I0620 18:10:51.560050  374244 kubeadm.go:309] [certs] etcd/peer serving cert is signed for DNS names [localhost no-preload-581163] and IPs [192.168.76.2 127.0.0.1 ::1]
	I0620 18:10:52.104042  374244 kubeadm.go:309] [certs] Generating "etcd/healthcheck-client" certificate and key
	I0620 18:10:53.036546  374244 kubeadm.go:309] [certs] Generating "apiserver-etcd-client" certificate and key
	I0620 18:10:53.726948  374244 kubeadm.go:309] [certs] Generating "sa" key and public key
	I0620 18:10:53.736075  374244 kubeadm.go:309] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
	I0620 18:10:54.420071  374244 kubeadm.go:309] [kubeconfig] Writing "admin.conf" kubeconfig file
	I0620 18:10:52.432238  362254 pod_ready.go:81] duration metric: took 4m0.006216679s for pod "metrics-server-9975d5f86-nxk7p" in "kube-system" namespace to be "Ready" ...
	E0620 18:10:52.432430  362254 pod_ready.go:66] WaitExtra: waitPodCondition: context deadline exceeded
	I0620 18:10:52.432456  362254 pod_ready.go:38] duration metric: took 5m28.676281099s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
	I0620 18:10:52.432502  362254 api_server.go:52] waiting for apiserver process to appear ...
	I0620 18:10:52.432625  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-apiserver --format={{.ID}}
	I0620 18:10:52.455740  362254 logs.go:276] 2 containers: [075e697d07c8 760f9a7d272c]
	I0620 18:10:52.455824  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_etcd --format={{.ID}}
	I0620 18:10:52.475939  362254 logs.go:276] 2 containers: [25a953b6e46d 81eaddbb4b45]
	I0620 18:10:52.476016  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_coredns --format={{.ID}}
	I0620 18:10:52.494299  362254 logs.go:276] 2 containers: [5534d9d547fd 7e60d81fce7f]
	I0620 18:10:52.494380  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-scheduler --format={{.ID}}
	I0620 18:10:52.512744  362254 logs.go:276] 2 containers: [4ed5438feb8b ab177be73cae]
	I0620 18:10:52.512827  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-proxy --format={{.ID}}
	I0620 18:10:52.536524  362254 logs.go:276] 2 containers: [0b32a67571bb c47b7591b320]
	I0620 18:10:52.536611  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-controller-manager --format={{.ID}}
	I0620 18:10:52.563389  362254 logs.go:276] 2 containers: [aab72b193950 a4eee0f3ea35]
	I0620 18:10:52.563475  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kindnet --format={{.ID}}
	I0620 18:10:52.582302  362254 logs.go:276] 0 containers: []
	W0620 18:10:52.582326  362254 logs.go:278] No container was found matching "kindnet"
	I0620 18:10:52.582383  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kubernetes-dashboard --format={{.ID}}
	I0620 18:10:52.622007  362254 logs.go:276] 1 containers: [49efdc60eccc]
	I0620 18:10:52.622087  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_storage-provisioner --format={{.ID}}
	I0620 18:10:52.650609  362254 logs.go:276] 2 containers: [74b467d165b4 16e28ef3ddc9]
	I0620 18:10:52.650645  362254 logs.go:123] Gathering logs for kube-apiserver [760f9a7d272c] ...
	I0620 18:10:52.650656  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 760f9a7d272c"
	I0620 18:10:52.730252  362254 logs.go:123] Gathering logs for kube-proxy [0b32a67571bb] ...
	I0620 18:10:52.730312  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 0b32a67571bb"
	I0620 18:10:52.753952  362254 logs.go:123] Gathering logs for kube-proxy [c47b7591b320] ...
	I0620 18:10:52.753983  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 c47b7591b320"
	I0620 18:10:52.788507  362254 logs.go:123] Gathering logs for kube-controller-manager [aab72b193950] ...
	I0620 18:10:52.788535  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 aab72b193950"
	I0620 18:10:52.863485  362254 logs.go:123] Gathering logs for container status ...
	I0620 18:10:52.863526  362254 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
	I0620 18:10:52.948470  362254 logs.go:123] Gathering logs for describe nodes ...
	I0620 18:10:52.948501  362254 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.20.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
	I0620 18:10:53.151745  362254 logs.go:123] Gathering logs for kube-apiserver [075e697d07c8] ...
	I0620 18:10:53.151776  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 075e697d07c8"
	I0620 18:10:53.215264  362254 logs.go:123] Gathering logs for etcd [81eaddbb4b45] ...
	I0620 18:10:53.215345  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 81eaddbb4b45"
	I0620 18:10:53.278713  362254 logs.go:123] Gathering logs for coredns [5534d9d547fd] ...
	I0620 18:10:53.278786  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 5534d9d547fd"
	I0620 18:10:53.303506  362254 logs.go:123] Gathering logs for storage-provisioner [74b467d165b4] ...
	I0620 18:10:53.303539  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 74b467d165b4"
	I0620 18:10:53.330579  362254 logs.go:123] Gathering logs for kubelet ...
	I0620 18:10:53.330606  362254 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
	W0620 18:10:53.394844  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:23 old-k8s-version-577369 kubelet[1201]: E0620 18:05:23.695585    1201 reflector.go:138] object-"kube-system"/"metrics-server-token-xd7wc": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "metrics-server-token-xd7wc" is forbidden: User "system:node:old-k8s-version-577369" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-577369' and this object
	W0620 18:10:53.395084  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:23 old-k8s-version-577369 kubelet[1201]: E0620 18:05:23.698643    1201 reflector.go:138] object-"kube-system"/"kube-proxy": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "kube-proxy" is forbidden: User "system:node:old-k8s-version-577369" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-577369' and this object
	W0620 18:10:53.395315  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:23 old-k8s-version-577369 kubelet[1201]: E0620 18:05:23.698704    1201 reflector.go:138] object-"kube-system"/"storage-provisioner-token-b9lfw": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "storage-provisioner-token-b9lfw" is forbidden: User "system:node:old-k8s-version-577369" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-577369' and this object
	W0620 18:10:53.395517  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:23 old-k8s-version-577369 kubelet[1201]: E0620 18:05:23.698918    1201 reflector.go:138] object-"kube-system"/"coredns": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "coredns" is forbidden: User "system:node:old-k8s-version-577369" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-577369' and this object
	W0620 18:10:53.395728  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:23 old-k8s-version-577369 kubelet[1201]: E0620 18:05:23.699130    1201 reflector.go:138] object-"kube-system"/"coredns-token-br6s7": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "coredns-token-br6s7" is forbidden: User "system:node:old-k8s-version-577369" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-577369' and this object
	W0620 18:10:53.395936  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:23 old-k8s-version-577369 kubelet[1201]: E0620 18:05:23.699223    1201 reflector.go:138] object-"default"/"default-token-9m8v4": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "default-token-9m8v4" is forbidden: User "system:node:old-k8s-version-577369" cannot list resource "secrets" in API group "" in the namespace "default": no relationship found between node 'old-k8s-version-577369' and this object
	W0620 18:10:53.396154  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:23 old-k8s-version-577369 kubelet[1201]: E0620 18:05:23.699291    1201 reflector.go:138] object-"kube-system"/"kube-proxy-token-kwb2s": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "kube-proxy-token-kwb2s" is forbidden: User "system:node:old-k8s-version-577369" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-577369' and this object
	W0620 18:10:53.402899  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:28 old-k8s-version-577369 kubelet[1201]: E0620 18:05:27.999585    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0620 18:10:53.403705  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:28 old-k8s-version-577369 kubelet[1201]: E0620 18:05:28.453263    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.406169  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:41 old-k8s-version-577369 kubelet[1201]: E0620 18:05:41.280792    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0620 18:10:53.410905  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:46 old-k8s-version-577369 kubelet[1201]: E0620 18:05:46.379764    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0620 18:10:53.411492  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:46 old-k8s-version-577369 kubelet[1201]: E0620 18:05:46.795712    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.411691  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:47 old-k8s-version-577369 kubelet[1201]: E0620 18:05:47.792472    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.412058  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:53 old-k8s-version-577369 kubelet[1201]: E0620 18:05:53.229484    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.412707  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:58 old-k8s-version-577369 kubelet[1201]: E0620 18:05:58.937844    1201 pod_workers.go:191] Error syncing pod 47a50d23-d504-4f3b-a3a1-97513673c10e ("storage-provisioner_kube-system(47a50d23-d504-4f3b-a3a1-97513673c10e)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 10s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(47a50d23-d504-4f3b-a3a1-97513673c10e)"
	W0620 18:10:53.415122  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:00 old-k8s-version-577369 kubelet[1201]: E0620 18:06:00.973843    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0620 18:10:53.417576  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:06 old-k8s-version-577369 kubelet[1201]: E0620 18:06:06.253227    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0620 18:10:53.417909  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:12 old-k8s-version-577369 kubelet[1201]: E0620 18:06:12.213225    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.418095  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:20 old-k8s-version-577369 kubelet[1201]: E0620 18:06:20.220208    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.420319  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:25 old-k8s-version-577369 kubelet[1201]: E0620 18:06:25.880053    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0620 18:10:53.420506  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:34 old-k8s-version-577369 kubelet[1201]: E0620 18:06:34.213970    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.420708  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:39 old-k8s-version-577369 kubelet[1201]: E0620 18:06:39.231731    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.422875  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:49 old-k8s-version-577369 kubelet[1201]: E0620 18:06:49.256445    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0620 18:10:53.423094  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:53 old-k8s-version-577369 kubelet[1201]: E0620 18:06:53.229559    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.423282  362254 logs.go:138] Found kubelet problem: Jun 20 18:07:02 old-k8s-version-577369 kubelet[1201]: E0620 18:07:02.213019    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.425508  362254 logs.go:138] Found kubelet problem: Jun 20 18:07:07 old-k8s-version-577369 kubelet[1201]: E0620 18:07:07.909458    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0620 18:10:53.425695  362254 logs.go:138] Found kubelet problem: Jun 20 18:07:17 old-k8s-version-577369 kubelet[1201]: E0620 18:07:17.213306    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.425892  362254 logs.go:138] Found kubelet problem: Jun 20 18:07:20 old-k8s-version-577369 kubelet[1201]: E0620 18:07:20.213221    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.426129  362254 logs.go:138] Found kubelet problem: Jun 20 18:07:32 old-k8s-version-577369 kubelet[1201]: E0620 18:07:32.213001    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.426358  362254 logs.go:138] Found kubelet problem: Jun 20 18:07:35 old-k8s-version-577369 kubelet[1201]: E0620 18:07:35.214923    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.426574  362254 logs.go:138] Found kubelet problem: Jun 20 18:07:47 old-k8s-version-577369 kubelet[1201]: E0620 18:07:47.215155    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.426803  362254 logs.go:138] Found kubelet problem: Jun 20 18:07:50 old-k8s-version-577369 kubelet[1201]: E0620 18:07:50.213002    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.427024  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:02 old-k8s-version-577369 kubelet[1201]: E0620 18:08:02.213224    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.427277  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:04 old-k8s-version-577369 kubelet[1201]: E0620 18:08:04.224800    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.429643  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:13 old-k8s-version-577369 kubelet[1201]: E0620 18:08:13.248654    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0620 18:10:53.429866  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:18 old-k8s-version-577369 kubelet[1201]: E0620 18:08:18.213216    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.430079  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:24 old-k8s-version-577369 kubelet[1201]: E0620 18:08:24.213044    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.432435  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:31 old-k8s-version-577369 kubelet[1201]: E0620 18:08:31.863326    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0620 18:10:53.432690  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:35 old-k8s-version-577369 kubelet[1201]: E0620 18:08:35.226288    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.432906  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:44 old-k8s-version-577369 kubelet[1201]: E0620 18:08:44.214061    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.433095  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:46 old-k8s-version-577369 kubelet[1201]: E0620 18:08:46.213051    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.433312  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:57 old-k8s-version-577369 kubelet[1201]: E0620 18:08:57.213817    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.434754  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:58 old-k8s-version-577369 kubelet[1201]: E0620 18:08:58.213216    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.434962  362254 logs.go:138] Found kubelet problem: Jun 20 18:09:11 old-k8s-version-577369 kubelet[1201]: E0620 18:09:11.219178    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.435211  362254 logs.go:138] Found kubelet problem: Jun 20 18:09:11 old-k8s-version-577369 kubelet[1201]: E0620 18:09:11.223804    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.435405  362254 logs.go:138] Found kubelet problem: Jun 20 18:09:25 old-k8s-version-577369 kubelet[1201]: E0620 18:09:25.213628    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.435612  362254 logs.go:138] Found kubelet problem: Jun 20 18:09:25 old-k8s-version-577369 kubelet[1201]: E0620 18:09:25.220085    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.435796  362254 logs.go:138] Found kubelet problem: Jun 20 18:09:37 old-k8s-version-577369 kubelet[1201]: E0620 18:09:37.218633    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.435993  362254 logs.go:138] Found kubelet problem: Jun 20 18:09:39 old-k8s-version-577369 kubelet[1201]: E0620 18:09:39.213129    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.436176  362254 logs.go:138] Found kubelet problem: Jun 20 18:09:49 old-k8s-version-577369 kubelet[1201]: E0620 18:09:49.212880    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.436373  362254 logs.go:138] Found kubelet problem: Jun 20 18:09:52 old-k8s-version-577369 kubelet[1201]: E0620 18:09:52.213161    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.436556  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:01 old-k8s-version-577369 kubelet[1201]: E0620 18:10:01.220058    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.436792  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:07 old-k8s-version-577369 kubelet[1201]: E0620 18:10:07.213133    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.436983  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:16 old-k8s-version-577369 kubelet[1201]: E0620 18:10:16.212946    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.437180  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:18 old-k8s-version-577369 kubelet[1201]: E0620 18:10:18.220165    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.437378  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:31 old-k8s-version-577369 kubelet[1201]: E0620 18:10:31.213199    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.437565  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:31 old-k8s-version-577369 kubelet[1201]: E0620 18:10:31.215427    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.443097  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:42 old-k8s-version-577369 kubelet[1201]: E0620 18:10:42.213713    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.443303  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:43 old-k8s-version-577369 kubelet[1201]: E0620 18:10:43.221571    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	I0620 18:10:53.443318  362254 logs.go:123] Gathering logs for etcd [25a953b6e46d] ...
	I0620 18:10:53.443331  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 25a953b6e46d"
	I0620 18:10:53.484821  362254 logs.go:123] Gathering logs for storage-provisioner [16e28ef3ddc9] ...
	I0620 18:10:53.484852  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 16e28ef3ddc9"
	I0620 18:10:53.512687  362254 logs.go:123] Gathering logs for Docker ...
	I0620 18:10:53.512715  362254 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u docker -u cri-docker -n 400"
	I0620 18:10:53.542148  362254 logs.go:123] Gathering logs for kubernetes-dashboard [49efdc60eccc] ...
	I0620 18:10:53.542183  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 49efdc60eccc"
	I0620 18:10:53.570140  362254 logs.go:123] Gathering logs for dmesg ...
	I0620 18:10:53.570171  362254 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
	I0620 18:10:53.588771  362254 logs.go:123] Gathering logs for coredns [7e60d81fce7f] ...
	I0620 18:10:53.588800  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 7e60d81fce7f"
	I0620 18:10:53.615089  362254 logs.go:123] Gathering logs for kube-scheduler [4ed5438feb8b] ...
	I0620 18:10:53.615127  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 4ed5438feb8b"
	I0620 18:10:53.647925  362254 logs.go:123] Gathering logs for kube-scheduler [ab177be73cae] ...
	I0620 18:10:53.647953  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 ab177be73cae"
	I0620 18:10:53.678028  362254 logs.go:123] Gathering logs for kube-controller-manager [a4eee0f3ea35] ...
	I0620 18:10:53.678058  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 a4eee0f3ea35"
	I0620 18:10:53.741842  362254 out.go:304] Setting ErrFile to fd 2...
	I0620 18:10:53.741873  362254 out.go:338] TERM=,COLORTERM=, which probably does not support color
	W0620 18:10:53.741941  362254 out.go:239] X Problems detected in kubelet:
	W0620 18:10:53.741956  362254 out.go:239]   Jun 20 18:10:18 old-k8s-version-577369 kubelet[1201]: E0620 18:10:18.220165    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.741969  362254 out.go:239]   Jun 20 18:10:31 old-k8s-version-577369 kubelet[1201]: E0620 18:10:31.213199    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.741977  362254 out.go:239]   Jun 20 18:10:31 old-k8s-version-577369 kubelet[1201]: E0620 18:10:31.215427    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.741984  362254 out.go:239]   Jun 20 18:10:42 old-k8s-version-577369 kubelet[1201]: E0620 18:10:42.213713    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:10:53.742104  362254 out.go:239]   Jun 20 18:10:43 old-k8s-version-577369 kubelet[1201]: E0620 18:10:43.221571    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	I0620 18:10:53.742120  362254 out.go:304] Setting ErrFile to fd 2...
	I0620 18:10:53.742126  362254 out.go:338] TERM=,COLORTERM=, which probably does not support color
	I0620 18:10:55.972976  374244 kubeadm.go:309] [kubeconfig] Writing "super-admin.conf" kubeconfig file
	I0620 18:10:56.437195  374244 kubeadm.go:309] [kubeconfig] Writing "kubelet.conf" kubeconfig file
	I0620 18:10:56.906582  374244 kubeadm.go:309] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
	I0620 18:10:58.378177  374244 kubeadm.go:309] [kubeconfig] Writing "scheduler.conf" kubeconfig file
	I0620 18:10:58.378517  374244 kubeadm.go:309] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
	I0620 18:10:58.381887  374244 kubeadm.go:309] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
	I0620 18:10:58.384376  374244 out.go:204]   - Booting up control plane ...
	I0620 18:10:58.384477  374244 kubeadm.go:309] [control-plane] Creating static Pod manifest for "kube-apiserver"
	I0620 18:10:58.384555  374244 kubeadm.go:309] [control-plane] Creating static Pod manifest for "kube-controller-manager"
	I0620 18:10:58.385260  374244 kubeadm.go:309] [control-plane] Creating static Pod manifest for "kube-scheduler"
	I0620 18:10:58.396909  374244 kubeadm.go:309] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
	I0620 18:10:58.398528  374244 kubeadm.go:309] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
	I0620 18:10:58.398914  374244 kubeadm.go:309] [kubelet-start] Starting the kubelet
	I0620 18:10:58.511469  374244 kubeadm.go:309] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
	I0620 18:10:58.511555  374244 kubeadm.go:309] [kubelet-check] Waiting for a healthy kubelet. This can take up to 4m0s
	I0620 18:10:59.507131  374244 kubeadm.go:309] [kubelet-check] The kubelet is healthy after 1.001495822s
	I0620 18:10:59.507223  374244 kubeadm.go:309] [api-check] Waiting for a healthy API server. This can take up to 4m0s
	I0620 18:11:03.744154  362254 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I0620 18:11:03.759975  362254 api_server.go:72] duration metric: took 5m53.685709451s to wait for apiserver process to appear ...
	I0620 18:11:03.759998  362254 api_server.go:88] waiting for apiserver healthz status ...
	I0620 18:11:03.760078  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-apiserver --format={{.ID}}
	I0620 18:11:03.798135  362254 logs.go:276] 2 containers: [075e697d07c8 760f9a7d272c]
	I0620 18:11:03.798211  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_etcd --format={{.ID}}
	I0620 18:11:03.831434  362254 logs.go:276] 2 containers: [25a953b6e46d 81eaddbb4b45]
	I0620 18:11:03.831511  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_coredns --format={{.ID}}
	I0620 18:11:03.861101  362254 logs.go:276] 2 containers: [5534d9d547fd 7e60d81fce7f]
	I0620 18:11:03.861185  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-scheduler --format={{.ID}}
	I0620 18:11:03.905927  362254 logs.go:276] 2 containers: [4ed5438feb8b ab177be73cae]
	I0620 18:11:03.906011  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-proxy --format={{.ID}}
	I0620 18:11:03.944486  362254 logs.go:276] 2 containers: [0b32a67571bb c47b7591b320]
	I0620 18:11:03.944579  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-controller-manager --format={{.ID}}
	I0620 18:11:03.975451  362254 logs.go:276] 2 containers: [aab72b193950 a4eee0f3ea35]
	I0620 18:11:03.975533  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kindnet --format={{.ID}}
	I0620 18:11:03.998509  362254 logs.go:276] 0 containers: []
	W0620 18:11:03.998529  362254 logs.go:278] No container was found matching "kindnet"
	I0620 18:11:03.998586  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kubernetes-dashboard --format={{.ID}}
	I0620 18:11:04.031360  362254 logs.go:276] 1 containers: [49efdc60eccc]
	I0620 18:11:04.031444  362254 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_storage-provisioner --format={{.ID}}
	I0620 18:11:04.057548  362254 logs.go:276] 2 containers: [74b467d165b4 16e28ef3ddc9]
	I0620 18:11:04.057630  362254 logs.go:123] Gathering logs for kubelet ...
	I0620 18:11:04.057656  362254 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
	W0620 18:11:04.137517  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:23 old-k8s-version-577369 kubelet[1201]: E0620 18:05:23.695585    1201 reflector.go:138] object-"kube-system"/"metrics-server-token-xd7wc": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "metrics-server-token-xd7wc" is forbidden: User "system:node:old-k8s-version-577369" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-577369' and this object
	W0620 18:11:04.137802  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:23 old-k8s-version-577369 kubelet[1201]: E0620 18:05:23.698643    1201 reflector.go:138] object-"kube-system"/"kube-proxy": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "kube-proxy" is forbidden: User "system:node:old-k8s-version-577369" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-577369' and this object
	W0620 18:11:04.138082  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:23 old-k8s-version-577369 kubelet[1201]: E0620 18:05:23.698704    1201 reflector.go:138] object-"kube-system"/"storage-provisioner-token-b9lfw": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "storage-provisioner-token-b9lfw" is forbidden: User "system:node:old-k8s-version-577369" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-577369' and this object
	W0620 18:11:04.138316  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:23 old-k8s-version-577369 kubelet[1201]: E0620 18:05:23.698918    1201 reflector.go:138] object-"kube-system"/"coredns": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "coredns" is forbidden: User "system:node:old-k8s-version-577369" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-577369' and this object
	W0620 18:11:04.138554  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:23 old-k8s-version-577369 kubelet[1201]: E0620 18:05:23.699130    1201 reflector.go:138] object-"kube-system"/"coredns-token-br6s7": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "coredns-token-br6s7" is forbidden: User "system:node:old-k8s-version-577369" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-577369' and this object
	W0620 18:11:04.138788  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:23 old-k8s-version-577369 kubelet[1201]: E0620 18:05:23.699223    1201 reflector.go:138] object-"default"/"default-token-9m8v4": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "default-token-9m8v4" is forbidden: User "system:node:old-k8s-version-577369" cannot list resource "secrets" in API group "" in the namespace "default": no relationship found between node 'old-k8s-version-577369' and this object
	W0620 18:11:04.139088  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:23 old-k8s-version-577369 kubelet[1201]: E0620 18:05:23.699291    1201 reflector.go:138] object-"kube-system"/"kube-proxy-token-kwb2s": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "kube-proxy-token-kwb2s" is forbidden: User "system:node:old-k8s-version-577369" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-577369' and this object
	W0620 18:11:04.145789  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:28 old-k8s-version-577369 kubelet[1201]: E0620 18:05:27.999585    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0620 18:11:04.146521  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:28 old-k8s-version-577369 kubelet[1201]: E0620 18:05:28.453263    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.149146  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:41 old-k8s-version-577369 kubelet[1201]: E0620 18:05:41.280792    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0620 18:11:04.155757  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:46 old-k8s-version-577369 kubelet[1201]: E0620 18:05:46.379764    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0620 18:11:04.156394  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:46 old-k8s-version-577369 kubelet[1201]: E0620 18:05:46.795712    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.156632  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:47 old-k8s-version-577369 kubelet[1201]: E0620 18:05:47.792472    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.157039  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:53 old-k8s-version-577369 kubelet[1201]: E0620 18:05:53.229484    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.157839  362254 logs.go:138] Found kubelet problem: Jun 20 18:05:58 old-k8s-version-577369 kubelet[1201]: E0620 18:05:58.937844    1201 pod_workers.go:191] Error syncing pod 47a50d23-d504-4f3b-a3a1-97513673c10e ("storage-provisioner_kube-system(47a50d23-d504-4f3b-a3a1-97513673c10e)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 10s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(47a50d23-d504-4f3b-a3a1-97513673c10e)"
	W0620 18:11:04.160409  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:00 old-k8s-version-577369 kubelet[1201]: E0620 18:06:00.973843    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0620 18:11:04.162846  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:06 old-k8s-version-577369 kubelet[1201]: E0620 18:06:06.253227    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0620 18:11:04.163226  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:12 old-k8s-version-577369 kubelet[1201]: E0620 18:06:12.213225    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.163438  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:20 old-k8s-version-577369 kubelet[1201]: E0620 18:06:20.220208    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.165695  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:25 old-k8s-version-577369 kubelet[1201]: E0620 18:06:25.880053    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0620 18:11:04.165909  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:34 old-k8s-version-577369 kubelet[1201]: E0620 18:06:34.213970    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.166131  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:39 old-k8s-version-577369 kubelet[1201]: E0620 18:06:39.231731    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.168276  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:49 old-k8s-version-577369 kubelet[1201]: E0620 18:06:49.256445    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0620 18:11:04.168505  362254 logs.go:138] Found kubelet problem: Jun 20 18:06:53 old-k8s-version-577369 kubelet[1201]: E0620 18:06:53.229559    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.168720  362254 logs.go:138] Found kubelet problem: Jun 20 18:07:02 old-k8s-version-577369 kubelet[1201]: E0620 18:07:02.213019    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.170964  362254 logs.go:138] Found kubelet problem: Jun 20 18:07:07 old-k8s-version-577369 kubelet[1201]: E0620 18:07:07.909458    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0620 18:11:04.176940  362254 logs.go:138] Found kubelet problem: Jun 20 18:07:17 old-k8s-version-577369 kubelet[1201]: E0620 18:07:17.213306    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.177172  362254 logs.go:138] Found kubelet problem: Jun 20 18:07:20 old-k8s-version-577369 kubelet[1201]: E0620 18:07:20.213221    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.177380  362254 logs.go:138] Found kubelet problem: Jun 20 18:07:32 old-k8s-version-577369 kubelet[1201]: E0620 18:07:32.213001    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.177601  362254 logs.go:138] Found kubelet problem: Jun 20 18:07:35 old-k8s-version-577369 kubelet[1201]: E0620 18:07:35.214923    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.177809  362254 logs.go:138] Found kubelet problem: Jun 20 18:07:47 old-k8s-version-577369 kubelet[1201]: E0620 18:07:47.215155    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.178032  362254 logs.go:138] Found kubelet problem: Jun 20 18:07:50 old-k8s-version-577369 kubelet[1201]: E0620 18:07:50.213002    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.178241  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:02 old-k8s-version-577369 kubelet[1201]: E0620 18:08:02.213224    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.178468  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:04 old-k8s-version-577369 kubelet[1201]: E0620 18:08:04.224800    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.180581  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:13 old-k8s-version-577369 kubelet[1201]: E0620 18:08:13.248654    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0620 18:11:04.180807  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:18 old-k8s-version-577369 kubelet[1201]: E0620 18:08:18.213216    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.181015  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:24 old-k8s-version-577369 kubelet[1201]: E0620 18:08:24.213044    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.186304  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:31 old-k8s-version-577369 kubelet[1201]: E0620 18:08:31.863326    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0620 18:11:04.186572  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:35 old-k8s-version-577369 kubelet[1201]: E0620 18:08:35.226288    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.186801  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:44 old-k8s-version-577369 kubelet[1201]: E0620 18:08:44.214061    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.187082  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:46 old-k8s-version-577369 kubelet[1201]: E0620 18:08:46.213051    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.187306  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:57 old-k8s-version-577369 kubelet[1201]: E0620 18:08:57.213817    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.187514  362254 logs.go:138] Found kubelet problem: Jun 20 18:08:58 old-k8s-version-577369 kubelet[1201]: E0620 18:08:58.213216    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.187732  362254 logs.go:138] Found kubelet problem: Jun 20 18:09:11 old-k8s-version-577369 kubelet[1201]: E0620 18:09:11.219178    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.187954  362254 logs.go:138] Found kubelet problem: Jun 20 18:09:11 old-k8s-version-577369 kubelet[1201]: E0620 18:09:11.223804    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.188162  362254 logs.go:138] Found kubelet problem: Jun 20 18:09:25 old-k8s-version-577369 kubelet[1201]: E0620 18:09:25.213628    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.188387  362254 logs.go:138] Found kubelet problem: Jun 20 18:09:25 old-k8s-version-577369 kubelet[1201]: E0620 18:09:25.220085    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.188608  362254 logs.go:138] Found kubelet problem: Jun 20 18:09:37 old-k8s-version-577369 kubelet[1201]: E0620 18:09:37.218633    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.188830  362254 logs.go:138] Found kubelet problem: Jun 20 18:09:39 old-k8s-version-577369 kubelet[1201]: E0620 18:09:39.213129    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.189038  362254 logs.go:138] Found kubelet problem: Jun 20 18:09:49 old-k8s-version-577369 kubelet[1201]: E0620 18:09:49.212880    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.189258  362254 logs.go:138] Found kubelet problem: Jun 20 18:09:52 old-k8s-version-577369 kubelet[1201]: E0620 18:09:52.213161    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.189468  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:01 old-k8s-version-577369 kubelet[1201]: E0620 18:10:01.220058    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.189694  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:07 old-k8s-version-577369 kubelet[1201]: E0620 18:10:07.213133    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.189902  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:16 old-k8s-version-577369 kubelet[1201]: E0620 18:10:16.212946    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.190121  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:18 old-k8s-version-577369 kubelet[1201]: E0620 18:10:18.220165    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.190341  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:31 old-k8s-version-577369 kubelet[1201]: E0620 18:10:31.213199    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.190550  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:31 old-k8s-version-577369 kubelet[1201]: E0620 18:10:31.215427    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.190773  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:42 old-k8s-version-577369 kubelet[1201]: E0620 18:10:42.213713    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.193468  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:43 old-k8s-version-577369 kubelet[1201]: E0620 18:10:43.221571    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.193769  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:55 old-k8s-version-577369 kubelet[1201]: E0620 18:10:55.213780    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:04.195893  362254 logs.go:138] Found kubelet problem: Jun 20 18:10:56 old-k8s-version-577369 kubelet[1201]: E0620 18:10:56.254103    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	I0620 18:11:04.195923  362254 logs.go:123] Gathering logs for describe nodes ...
	I0620 18:11:04.195949  362254 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.20.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
	I0620 18:11:04.426160  362254 logs.go:123] Gathering logs for kube-apiserver [760f9a7d272c] ...
	I0620 18:11:04.426342  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 760f9a7d272c"
	I0620 18:11:04.528343  362254 logs.go:123] Gathering logs for kube-scheduler [ab177be73cae] ...
	I0620 18:11:04.528424  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 ab177be73cae"
	I0620 18:11:04.573441  362254 logs.go:123] Gathering logs for storage-provisioner [16e28ef3ddc9] ...
	I0620 18:11:04.573520  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 16e28ef3ddc9"
	I0620 18:11:04.632739  362254 logs.go:123] Gathering logs for Docker ...
	I0620 18:11:04.632808  362254 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u docker -u cri-docker -n 400"
	I0620 18:11:04.676322  362254 logs.go:123] Gathering logs for kube-apiserver [075e697d07c8] ...
	I0620 18:11:04.676396  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 075e697d07c8"
	I0620 18:11:04.751250  362254 logs.go:123] Gathering logs for etcd [25a953b6e46d] ...
	I0620 18:11:04.751327  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 25a953b6e46d"
	I0620 18:11:04.825564  362254 logs.go:123] Gathering logs for kube-controller-manager [a4eee0f3ea35] ...
	I0620 18:11:04.825644  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 a4eee0f3ea35"
	I0620 18:11:04.916404  362254 logs.go:123] Gathering logs for dmesg ...
	I0620 18:11:04.916479  362254 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
	I0620 18:11:04.944002  362254 logs.go:123] Gathering logs for etcd [81eaddbb4b45] ...
	I0620 18:11:04.944073  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 81eaddbb4b45"
	I0620 18:11:04.995804  362254 logs.go:123] Gathering logs for coredns [5534d9d547fd] ...
	I0620 18:11:04.995893  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 5534d9d547fd"
	I0620 18:11:05.044664  362254 logs.go:123] Gathering logs for kube-scheduler [4ed5438feb8b] ...
	I0620 18:11:05.044747  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 4ed5438feb8b"
	I0620 18:11:05.082366  362254 logs.go:123] Gathering logs for kube-proxy [0b32a67571bb] ...
	I0620 18:11:05.082463  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 0b32a67571bb"
	I0620 18:11:05.141372  362254 logs.go:123] Gathering logs for kubernetes-dashboard [49efdc60eccc] ...
	I0620 18:11:05.141442  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 49efdc60eccc"
	I0620 18:11:05.173015  362254 logs.go:123] Gathering logs for storage-provisioner [74b467d165b4] ...
	I0620 18:11:05.173101  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 74b467d165b4"
	I0620 18:11:05.211503  362254 logs.go:123] Gathering logs for coredns [7e60d81fce7f] ...
	I0620 18:11:05.211569  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 7e60d81fce7f"
	I0620 18:11:05.253118  362254 logs.go:123] Gathering logs for kube-proxy [c47b7591b320] ...
	I0620 18:11:05.253195  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 c47b7591b320"
	I0620 18:11:05.285055  362254 logs.go:123] Gathering logs for kube-controller-manager [aab72b193950] ...
	I0620 18:11:05.285121  362254 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 aab72b193950"
	I0620 18:11:05.348716  362254 logs.go:123] Gathering logs for container status ...
	I0620 18:11:05.348790  362254 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
	I0620 18:11:05.447039  362254 out.go:304] Setting ErrFile to fd 2...
	I0620 18:11:05.447134  362254 out.go:338] TERM=,COLORTERM=, which probably does not support color
	W0620 18:11:05.447272  362254 out.go:239] X Problems detected in kubelet:
	W0620 18:11:05.447326  362254 out.go:239]   Jun 20 18:10:31 old-k8s-version-577369 kubelet[1201]: E0620 18:10:31.215427    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:05.447358  362254 out.go:239]   Jun 20 18:10:42 old-k8s-version-577369 kubelet[1201]: E0620 18:10:42.213713    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:05.447432  362254 out.go:239]   Jun 20 18:10:43 old-k8s-version-577369 kubelet[1201]: E0620 18:10:43.221571    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0620 18:11:05.447521  362254 out.go:239]   Jun 20 18:10:55 old-k8s-version-577369 kubelet[1201]: E0620 18:10:55.213780    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0620 18:11:05.447602  362254 out.go:239]   Jun 20 18:10:56 old-k8s-version-577369 kubelet[1201]: E0620 18:10:56.254103    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	I0620 18:11:05.447662  362254 out.go:304] Setting ErrFile to fd 2...
	I0620 18:11:05.447707  362254 out.go:338] TERM=,COLORTERM=, which probably does not support color
	I0620 18:11:07.508615  374244 kubeadm.go:309] [api-check] The API server is healthy after 8.001393188s
	I0620 18:11:07.528898  374244 kubeadm.go:309] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
	I0620 18:11:07.542411  374244 kubeadm.go:309] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
	I0620 18:11:07.570599  374244 kubeadm.go:309] [upload-certs] Skipping phase. Please see --upload-certs
	I0620 18:11:07.570795  374244 kubeadm.go:309] [mark-control-plane] Marking the node no-preload-581163 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
	I0620 18:11:07.582943  374244 kubeadm.go:309] [bootstrap-token] Using token: uemerr.735al9maxxqt1jtu
	I0620 18:11:07.584864  374244 out.go:204]   - Configuring RBAC rules ...
	I0620 18:11:07.584982  374244 kubeadm.go:309] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
	I0620 18:11:07.614516  374244 kubeadm.go:309] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
	I0620 18:11:07.634177  374244 kubeadm.go:309] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
	I0620 18:11:07.639738  374244 kubeadm.go:309] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
	I0620 18:11:07.645384  374244 kubeadm.go:309] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
	I0620 18:11:07.650286  374244 kubeadm.go:309] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
	I0620 18:11:07.918361  374244 kubeadm.go:309] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
	I0620 18:11:08.396640  374244 kubeadm.go:309] [addons] Applied essential addon: CoreDNS
	I0620 18:11:08.922604  374244 kubeadm.go:309] [addons] Applied essential addon: kube-proxy
	I0620 18:11:08.923693  374244 kubeadm.go:309] 
	I0620 18:11:08.923775  374244 kubeadm.go:309] Your Kubernetes control-plane has initialized successfully!
	I0620 18:11:08.923785  374244 kubeadm.go:309] 
	I0620 18:11:08.923872  374244 kubeadm.go:309] To start using your cluster, you need to run the following as a regular user:
	I0620 18:11:08.923877  374244 kubeadm.go:309] 
	I0620 18:11:08.923909  374244 kubeadm.go:309]   mkdir -p $HOME/.kube
	I0620 18:11:08.923974  374244 kubeadm.go:309]   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
	I0620 18:11:08.924029  374244 kubeadm.go:309]   sudo chown $(id -u):$(id -g) $HOME/.kube/config
	I0620 18:11:08.924034  374244 kubeadm.go:309] 
	I0620 18:11:08.924095  374244 kubeadm.go:309] Alternatively, if you are the root user, you can run:
	I0620 18:11:08.924103  374244 kubeadm.go:309] 
	I0620 18:11:08.924164  374244 kubeadm.go:309]   export KUBECONFIG=/etc/kubernetes/admin.conf
	I0620 18:11:08.924171  374244 kubeadm.go:309] 
	I0620 18:11:08.924229  374244 kubeadm.go:309] You should now deploy a pod network to the cluster.
	I0620 18:11:08.924330  374244 kubeadm.go:309] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
	I0620 18:11:08.924397  374244 kubeadm.go:309]   https://kubernetes.io/docs/concepts/cluster-administration/addons/
	I0620 18:11:08.924401  374244 kubeadm.go:309] 
	I0620 18:11:08.924481  374244 kubeadm.go:309] You can now join any number of control-plane nodes by copying certificate authorities
	I0620 18:11:08.924575  374244 kubeadm.go:309] and service account keys on each node and then running the following as root:
	I0620 18:11:08.924582  374244 kubeadm.go:309] 
	I0620 18:11:08.924681  374244 kubeadm.go:309]   kubeadm join control-plane.minikube.internal:8443 --token uemerr.735al9maxxqt1jtu \
	I0620 18:11:08.924794  374244 kubeadm.go:309] 	--discovery-token-ca-cert-hash sha256:56a399a874c06050b1e615cba34cc00267ff85569d8529edb2baa3c00e1104f2 \
	I0620 18:11:08.924820  374244 kubeadm.go:309] 	--control-plane 
	I0620 18:11:08.924825  374244 kubeadm.go:309] 
	I0620 18:11:08.924913  374244 kubeadm.go:309] Then you can join any number of worker nodes by running the following on each as root:
	I0620 18:11:08.924924  374244 kubeadm.go:309] 
	I0620 18:11:08.925019  374244 kubeadm.go:309] kubeadm join control-plane.minikube.internal:8443 --token uemerr.735al9maxxqt1jtu \
	I0620 18:11:08.925135  374244 kubeadm.go:309] 	--discovery-token-ca-cert-hash sha256:56a399a874c06050b1e615cba34cc00267ff85569d8529edb2baa3c00e1104f2 
	I0620 18:11:08.929657  374244 kubeadm.go:309] 	[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1063-aws\n", err: exit status 1
	I0620 18:11:08.929786  374244 kubeadm.go:309] 	[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
	I0620 18:11:08.929815  374244 cni.go:84] Creating CNI manager for ""
	I0620 18:11:08.929833  374244 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
	I0620 18:11:08.933051  374244 out.go:177] * Configuring bridge CNI (Container Networking Interface) ...
	I0620 18:11:08.935242  374244 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d
	I0620 18:11:08.951748  374244 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (496 bytes)
	I0620 18:11:08.975720  374244 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
	I0620 18:11:08.975852  374244 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 18:11:08.975949  374244 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes no-preload-581163 minikube.k8s.io/updated_at=2024_06_20T18_11_08_0700 minikube.k8s.io/version=v1.33.1 minikube.k8s.io/commit=a5bfa5828b76fe92a3c5f89a54d8c76f6b5f3f8b minikube.k8s.io/name=no-preload-581163 minikube.k8s.io/primary=true
	I0620 18:11:09.129104  374244 ops.go:34] apiserver oom_adj: -16
	I0620 18:11:09.129272  374244 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 18:11:09.630249  374244 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 18:11:10.129328  374244 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 18:11:10.629341  374244 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 18:11:11.130228  374244 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 18:11:11.630189  374244 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 18:11:12.129358  374244 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 18:11:12.630148  374244 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 18:11:13.129782  374244 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 18:11:13.629789  374244 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 18:11:14.130014  374244 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 18:11:14.629342  374244 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 18:11:15.129853  374244 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0620 18:11:15.449472  362254 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
	I0620 18:11:15.463258  362254 api_server.go:279] https://192.168.85.2:8443/healthz returned 200:
	ok
	I0620 18:11:15.465704  362254 out.go:177] 
	W0620 18:11:15.467872  362254 out.go:239] X Exiting due to K8S_UNHEALTHY_CONTROL_PLANE: wait 6m0s for node: wait for healthy API server: controlPlane never updated to v1.20.0
	W0620 18:11:15.467924  362254 out.go:239] * Suggestion: Control Plane could not update, try minikube delete --all --purge
	W0620 18:11:15.467944  362254 out.go:239] * Related issue: https://github.com/kubernetes/minikube/issues/11417
	W0620 18:11:15.467950  362254 out.go:239] * 
	W0620 18:11:15.468829  362254 out.go:239] ╭─────────────────────────────────────────────────────────────────────────────────────────────╮
	│                                                                                             │
	│    * If the above advice does not help, please let us know:                                 │
	│      https://github.com/kubernetes/minikube/issues/new/choose                               │
	│                                                                                             │
	│    * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue.    │
	│                                                                                             │
	╰─────────────────────────────────────────────────────────────────────────────────────────────╯
	I0620 18:11:15.471344  362254 out.go:177] 
	
	
	==> Docker <==
	Jun 20 18:06:00 old-k8s-version-577369 dockerd[971]: time="2024-06-20T18:06:00.970786627Z" level=info msg="Attempting next endpoint for pull after error: [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" spanID=e36cb4e713c49d31 traceID=65b672c9fba7c4e212bf9cf2cb7a8f91
	Jun 20 18:06:06 old-k8s-version-577369 dockerd[971]: time="2024-06-20T18:06:06.244903113Z" level=warning msg="Error getting v2 registry: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host" spanID=6203455aaf42e303 traceID=e03707f105c39e77e4cabf556210b18b
	Jun 20 18:06:06 old-k8s-version-577369 dockerd[971]: time="2024-06-20T18:06:06.244955411Z" level=info msg="Attempting next endpoint for pull after error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host" spanID=6203455aaf42e303 traceID=e03707f105c39e77e4cabf556210b18b
	Jun 20 18:06:06 old-k8s-version-577369 dockerd[971]: time="2024-06-20T18:06:06.252543250Z" level=error msg="Handler for POST /v1.40/images/create returned error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host" spanID=6203455aaf42e303 traceID=e03707f105c39e77e4cabf556210b18b
	Jun 20 18:06:25 old-k8s-version-577369 dockerd[971]: time="2024-06-20T18:06:25.589619234Z" level=warning msg="reference for unknown type: application/vnd.docker.distribution.manifest.v1+prettyjws" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4" spanID=4ca7d1788d77f4ca traceID=4a76ee043bab3c9d153a5e459672ebaf
	Jun 20 18:06:25 old-k8s-version-577369 dockerd[971]: time="2024-06-20T18:06:25.871198138Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4" spanID=4ca7d1788d77f4ca traceID=4a76ee043bab3c9d153a5e459672ebaf
	Jun 20 18:06:25 old-k8s-version-577369 dockerd[971]: time="2024-06-20T18:06:25.871531177Z" level=warning msg="[DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4" spanID=4ca7d1788d77f4ca traceID=4a76ee043bab3c9d153a5e459672ebaf
	Jun 20 18:06:25 old-k8s-version-577369 dockerd[971]: time="2024-06-20T18:06:25.871667904Z" level=info msg="Attempting next endpoint for pull after error: [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" spanID=4ca7d1788d77f4ca traceID=4a76ee043bab3c9d153a5e459672ebaf
	Jun 20 18:06:49 old-k8s-version-577369 dockerd[971]: time="2024-06-20T18:06:49.252253313Z" level=warning msg="Error getting v2 registry: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host" spanID=20f97471919c74f4 traceID=e84eb518076b83dd07a97acc77964edf
	Jun 20 18:06:49 old-k8s-version-577369 dockerd[971]: time="2024-06-20T18:06:49.252370446Z" level=info msg="Attempting next endpoint for pull after error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host" spanID=20f97471919c74f4 traceID=e84eb518076b83dd07a97acc77964edf
	Jun 20 18:06:49 old-k8s-version-577369 dockerd[971]: time="2024-06-20T18:06:49.254853467Z" level=error msg="Handler for POST /v1.40/images/create returned error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host" spanID=20f97471919c74f4 traceID=e84eb518076b83dd07a97acc77964edf
	Jun 20 18:07:07 old-k8s-version-577369 dockerd[971]: time="2024-06-20T18:07:07.608620779Z" level=warning msg="reference for unknown type: application/vnd.docker.distribution.manifest.v1+prettyjws" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4" spanID=504b3a75c1c5aac3 traceID=c2e9eb90def83472d4fa1710813ccdc0
	Jun 20 18:07:07 old-k8s-version-577369 dockerd[971]: time="2024-06-20T18:07:07.906557281Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4" spanID=504b3a75c1c5aac3 traceID=c2e9eb90def83472d4fa1710813ccdc0
	Jun 20 18:07:07 old-k8s-version-577369 dockerd[971]: time="2024-06-20T18:07:07.906705183Z" level=warning msg="[DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4" spanID=504b3a75c1c5aac3 traceID=c2e9eb90def83472d4fa1710813ccdc0
	Jun 20 18:07:07 old-k8s-version-577369 dockerd[971]: time="2024-06-20T18:07:07.906748768Z" level=info msg="Attempting next endpoint for pull after error: [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" spanID=504b3a75c1c5aac3 traceID=c2e9eb90def83472d4fa1710813ccdc0
	Jun 20 18:08:13 old-k8s-version-577369 dockerd[971]: time="2024-06-20T18:08:13.243704717Z" level=warning msg="Error getting v2 registry: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host" spanID=e2bad89695e6bfc3 traceID=8c068e817337201bf3580c14bd541595
	Jun 20 18:08:13 old-k8s-version-577369 dockerd[971]: time="2024-06-20T18:08:13.243762397Z" level=info msg="Attempting next endpoint for pull after error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host" spanID=e2bad89695e6bfc3 traceID=8c068e817337201bf3580c14bd541595
	Jun 20 18:08:13 old-k8s-version-577369 dockerd[971]: time="2024-06-20T18:08:13.247742568Z" level=error msg="Handler for POST /v1.40/images/create returned error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host" spanID=e2bad89695e6bfc3 traceID=8c068e817337201bf3580c14bd541595
	Jun 20 18:08:31 old-k8s-version-577369 dockerd[971]: time="2024-06-20T18:08:31.578290311Z" level=warning msg="reference for unknown type: application/vnd.docker.distribution.manifest.v1+prettyjws" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4" spanID=3fee327a8a61db08 traceID=63160869622b13f7e28113eb3e6382e1
	Jun 20 18:08:31 old-k8s-version-577369 dockerd[971]: time="2024-06-20T18:08:31.860162017Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4" spanID=3fee327a8a61db08 traceID=63160869622b13f7e28113eb3e6382e1
	Jun 20 18:08:31 old-k8s-version-577369 dockerd[971]: time="2024-06-20T18:08:31.860304602Z" level=warning msg="[DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4" spanID=3fee327a8a61db08 traceID=63160869622b13f7e28113eb3e6382e1
	Jun 20 18:08:31 old-k8s-version-577369 dockerd[971]: time="2024-06-20T18:08:31.860341976Z" level=info msg="Attempting next endpoint for pull after error: [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" spanID=3fee327a8a61db08 traceID=63160869622b13f7e28113eb3e6382e1
	Jun 20 18:10:56 old-k8s-version-577369 dockerd[971]: time="2024-06-20T18:10:56.249842714Z" level=warning msg="Error getting v2 registry: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host" spanID=8470b7b199957dda traceID=e4f7af4e8b7d445e435c474bce36f67f
	Jun 20 18:10:56 old-k8s-version-577369 dockerd[971]: time="2024-06-20T18:10:56.249924058Z" level=info msg="Attempting next endpoint for pull after error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host" spanID=8470b7b199957dda traceID=e4f7af4e8b7d445e435c474bce36f67f
	Jun 20 18:10:56 old-k8s-version-577369 dockerd[971]: time="2024-06-20T18:10:56.253230104Z" level=error msg="Handler for POST /v1.40/images/create returned error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host" spanID=8470b7b199957dda traceID=e4f7af4e8b7d445e435c474bce36f67f
	
	
	==> container status <==
	CONTAINER           IMAGE                                                                                                 CREATED             STATE               NAME                      ATTEMPT             POD ID              POD
	74b467d165b47       ba04bb24b9575                                                                                         5 minutes ago       Running             storage-provisioner       2                   4e386924a6d6e       storage-provisioner
	49efdc60eccce       kubernetesui/dashboard@sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93        5 minutes ago       Running             kubernetes-dashboard      0                   38864320d656e       kubernetes-dashboard-cd95d586-l4fmb
	0b32a67571bbd       25a5233254979                                                                                         5 minutes ago       Running             kube-proxy                1                   3e56f33e55ff4       kube-proxy-qh57b
	16e28ef3ddc9f       ba04bb24b9575                                                                                         5 minutes ago       Exited              storage-provisioner       1                   4e386924a6d6e       storage-provisioner
	0de6cd1876d18       1611cd07b61d5                                                                                         5 minutes ago       Running             busybox                   1                   e217c7b51c650       busybox
	5534d9d547fd5       db91994f4ee8f                                                                                         5 minutes ago       Running             coredns                   1                   e532ce493eb98       coredns-74ff55c5b-92t8n
	25a953b6e46d0       05b738aa1bc63                                                                                         6 minutes ago       Running             etcd                      1                   1871a1e6580fe       etcd-old-k8s-version-577369
	4ed5438feb8b3       e7605f88f17d6                                                                                         6 minutes ago       Running             kube-scheduler            1                   6f395675accae       kube-scheduler-old-k8s-version-577369
	aab72b1939501       1df8a2b116bd1                                                                                         6 minutes ago       Running             kube-controller-manager   1                   dfa7ffd3df3f8       kube-controller-manager-old-k8s-version-577369
	075e697d07c85       2c08bbbc02d3a                                                                                         6 minutes ago       Running             kube-apiserver            1                   52d8ac1d85d54       kube-apiserver-old-k8s-version-577369
	c2bc2d85c8203       gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e   6 minutes ago       Exited              busybox                   0                   63fb3af78e96b       busybox
	7e60d81fce7fe       db91994f4ee8f                                                                                         8 minutes ago       Exited              coredns                   0                   411b208d208d8       coredns-74ff55c5b-92t8n
	c47b7591b3201       25a5233254979                                                                                         8 minutes ago       Exited              kube-proxy                0                   855b2a21e2baf       kube-proxy-qh57b
	ab177be73cae3       e7605f88f17d6                                                                                         8 minutes ago       Exited              kube-scheduler            0                   2c01fe13146c0       kube-scheduler-old-k8s-version-577369
	a4eee0f3ea354       1df8a2b116bd1                                                                                         8 minutes ago       Exited              kube-controller-manager   0                   7489744553e01       kube-controller-manager-old-k8s-version-577369
	760f9a7d272ca       2c08bbbc02d3a                                                                                         8 minutes ago       Exited              kube-apiserver            0                   312a5efe42939       kube-apiserver-old-k8s-version-577369
	81eaddbb4b456       05b738aa1bc63                                                                                         8 minutes ago       Exited              etcd                      0                   f4a0fd051a764       etcd-old-k8s-version-577369
	
	
	==> coredns [5534d9d547fd] <==
	.:53
	[INFO] plugin/reload: Running configuration MD5 = 093a0bf1423dd8c4eee62372bb216168
	CoreDNS-1.7.0
	linux/arm64, go1.14.4, f59c03d
	[INFO] 127.0.0.1:60770 - 13699 "HINFO IN 4091923126135840625.1712342005459160035. udp 57 false 512" NXDOMAIN qr,rd,ra 57 0.020819605s
	[INFO] plugin/ready: Still waiting on: "kubernetes"
	[INFO] plugin/ready: Still waiting on: "kubernetes"
	[INFO] plugin/ready: Still waiting on: "kubernetes"
	I0620 18:05:58.218647       1 trace.go:116] Trace[2019727887]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125 (started: 2024-06-20 18:05:28.217313028 +0000 UTC m=+0.043691740) (total time: 30.001233241s):
	Trace[2019727887]: [30.001233241s] [30.001233241s] END
	E0620 18:05:58.218688       1 reflector.go:178] pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125: Failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
	I0620 18:05:58.221509       1 trace.go:116] Trace[939984059]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125 (started: 2024-06-20 18:05:28.215930356 +0000 UTC m=+0.042309068) (total time: 30.005555259s):
	Trace[939984059]: [30.005555259s] [30.005555259s] END
	E0620 18:05:58.221536       1 reflector.go:178] pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125: Failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
	I0620 18:05:58.221631       1 trace.go:116] Trace[1474941318]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125 (started: 2024-06-20 18:05:28.217944792 +0000 UTC m=+0.044323495) (total time: 30.003673916s):
	Trace[1474941318]: [30.003673916s] [30.003673916s] END
	E0620 18:05:58.221973       1 reflector.go:178] pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125: Failed to list *v1.Endpoints: Get "https://10.96.0.1:443/api/v1/endpoints?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
	
	
	==> coredns [7e60d81fce7f] <==
	I0620 18:03:46.924624       1 trace.go:116] Trace[2019727887]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125 (started: 2024-06-20 18:03:16.923925874 +0000 UTC m=+0.030073544) (total time: 30.000589279s):
	Trace[2019727887]: [30.000589279s] [30.000589279s] END
	I0620 18:03:46.924908       1 trace.go:116] Trace[1427131847]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125 (started: 2024-06-20 18:03:16.924533293 +0000 UTC m=+0.030680963) (total time: 30.000347553s):
	Trace[1427131847]: [30.000347553s] [30.000347553s] END
	E0620 18:03:46.924937       1 reflector.go:178] pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125: Failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
	E0620 18:03:46.924914       1 reflector.go:178] pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125: Failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
	I0620 18:03:46.925232       1 trace.go:116] Trace[911902081]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125 (started: 2024-06-20 18:03:16.92488421 +0000 UTC m=+0.031031880) (total time: 30.000332193s):
	Trace[911902081]: [30.000332193s] [30.000332193s] END
	E0620 18:03:46.925333       1 reflector.go:178] pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125: Failed to list *v1.Endpoints: Get "https://10.96.0.1:443/api/v1/endpoints?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
	E0620 18:04:51.030078       1 reflector.go:382] pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125: Failed to watch *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?allowWatchBookmarks=true&resourceVersion=201&timeout=6m5s&timeoutSeconds=365&watch=true": dial tcp 10.96.0.1:443: connect: connection refused
	E0620 18:04:51.030146       1 reflector.go:382] pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125: Failed to watch *v1.Endpoints: Get "https://10.96.0.1:443/api/v1/endpoints?allowWatchBookmarks=true&resourceVersion=584&timeout=8m23s&timeoutSeconds=503&watch=true": dial tcp 10.96.0.1:443: connect: connection refused
	E0620 18:04:51.030556       1 reflector.go:382] pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125: Failed to watch *v1.Service: Get "https://10.96.0.1:443/api/v1/services?allowWatchBookmarks=true&resourceVersion=581&timeout=6m27s&timeoutSeconds=387&watch=true": dial tcp 10.96.0.1:443: connect: connection refused
	[INFO] plugin/reload: Running configuration MD5 = db32ca3650231d74073ff4cf814959a7
	CoreDNS-1.7.0
	linux/arm64, go1.14.4, f59c03d
	[INFO] plugin/ready: Still waiting on: "kubernetes"
	[INFO] plugin/ready: Still waiting on: "kubernetes"
	[INFO] plugin/ready: Still waiting on: "kubernetes"
	[INFO] Reloading
	[INFO] plugin/health: Going into lameduck mode for 5s
	[INFO] plugin/reload: Running configuration MD5 = 093a0bf1423dd8c4eee62372bb216168
	[INFO] Reloading complete
	[INFO] 127.0.0.1:50639 - 57127 "HINFO IN 982282391238305073.4272579212557183947. udp 56 false 512" NXDOMAIN qr,rd,ra 56 0.068021271s
	[INFO] SIGTERM: Shutting down servers then terminating
	[INFO] plugin/health: Going into lameduck mode for 5s
	
	
	==> describe nodes <==
	Name:               old-k8s-version-577369
	Roles:              control-plane,master
	Labels:             beta.kubernetes.io/arch=arm64
	                    beta.kubernetes.io/os=linux
	                    kubernetes.io/arch=arm64
	                    kubernetes.io/hostname=old-k8s-version-577369
	                    kubernetes.io/os=linux
	                    minikube.k8s.io/commit=a5bfa5828b76fe92a3c5f89a54d8c76f6b5f3f8b
	                    minikube.k8s.io/name=old-k8s-version-577369
	                    minikube.k8s.io/primary=true
	                    minikube.k8s.io/updated_at=2024_06_20T18_03_00_0700
	                    minikube.k8s.io/version=v1.33.1
	                    node-role.kubernetes.io/control-plane=
	                    node-role.kubernetes.io/master=
	Annotations:        kubeadm.alpha.kubernetes.io/cri-socket: /var/run/dockershim.sock
	                    node.alpha.kubernetes.io/ttl: 0
	                    volumes.kubernetes.io/controller-managed-attach-detach: true
	CreationTimestamp:  Thu, 20 Jun 2024 18:02:57 +0000
	Taints:             <none>
	Unschedulable:      false
	Lease:
	  HolderIdentity:  old-k8s-version-577369
	  AcquireTime:     <unset>
	  RenewTime:       Thu, 20 Jun 2024 18:11:16 +0000
	Conditions:
	  Type             Status  LastHeartbeatTime                 LastTransitionTime                Reason                       Message
	  ----             ------  -----------------                 ------------------                ------                       -------
	  MemoryPressure   False   Thu, 20 Jun 2024 18:11:16 +0000   Thu, 20 Jun 2024 18:02:50 +0000   KubeletHasSufficientMemory   kubelet has sufficient memory available
	  DiskPressure     False   Thu, 20 Jun 2024 18:11:16 +0000   Thu, 20 Jun 2024 18:02:50 +0000   KubeletHasNoDiskPressure     kubelet has no disk pressure
	  PIDPressure      False   Thu, 20 Jun 2024 18:11:16 +0000   Thu, 20 Jun 2024 18:02:50 +0000   KubeletHasSufficientPID      kubelet has sufficient PID available
	  Ready            True    Thu, 20 Jun 2024 18:11:16 +0000   Thu, 20 Jun 2024 18:03:14 +0000   KubeletReady                 kubelet is posting ready status
	Addresses:
	  InternalIP:  192.168.85.2
	  Hostname:    old-k8s-version-577369
	Capacity:
	  cpu:                2
	  ephemeral-storage:  203034800Ki
	  hugepages-1Gi:      0
	  hugepages-2Mi:      0
	  hugepages-32Mi:     0
	  hugepages-64Ki:     0
	  memory:             8022364Ki
	  pods:               110
	Allocatable:
	  cpu:                2
	  ephemeral-storage:  203034800Ki
	  hugepages-1Gi:      0
	  hugepages-2Mi:      0
	  hugepages-32Mi:     0
	  hugepages-64Ki:     0
	  memory:             8022364Ki
	  pods:               110
	System Info:
	  Machine ID:                 931519ae86cb4478acdcf011166db9ae
	  System UUID:                779bad83-2a0e-4933-8d7a-bb52fc649157
	  Boot ID:                    c14a5c8e-2318-4449-baf4-6a576bee7c02
	  Kernel Version:             5.15.0-1063-aws
	  OS Image:                   Ubuntu 22.04.4 LTS
	  Operating System:           linux
	  Architecture:               arm64
	  Container Runtime Version:  docker://26.1.4
	  Kubelet Version:            v1.20.0
	  Kube-Proxy Version:         v1.20.0
	PodCIDR:                      10.244.0.0/24
	PodCIDRs:                     10.244.0.0/24
	Non-terminated Pods:          (11 in total)
	  Namespace                   Name                                              CPU Requests  CPU Limits  Memory Requests  Memory Limits  AGE
	  ---------                   ----                                              ------------  ----------  ---------------  -------------  ---
	  default                     busybox                                           0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         6m36s
	  kube-system                 coredns-74ff55c5b-92t8n                           100m (5%!)(MISSING)     0 (0%!)(MISSING)      70Mi (0%!)(MISSING)        170Mi (2%!)(MISSING)     8m1s
	  kube-system                 etcd-old-k8s-version-577369                       100m (5%!)(MISSING)     0 (0%!)(MISSING)      100Mi (1%!)(MISSING)       0 (0%!)(MISSING)         8m12s
	  kube-system                 kube-apiserver-old-k8s-version-577369             250m (12%!)(MISSING)    0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         8m12s
	  kube-system                 kube-controller-manager-old-k8s-version-577369    200m (10%!)(MISSING)    0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         8m12s
	  kube-system                 kube-proxy-qh57b                                  0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         8m1s
	  kube-system                 kube-scheduler-old-k8s-version-577369             100m (5%!)(MISSING)     0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         8m12s
	  kube-system                 metrics-server-9975d5f86-nxk7p                    100m (5%!)(MISSING)     0 (0%!)(MISSING)      200Mi (2%!)(MISSING)       0 (0%!)(MISSING)         6m26s
	  kube-system                 storage-provisioner                               0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         7m59s
	  kubernetes-dashboard        dashboard-metrics-scraper-8d5bb5db8-hhrcd         0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         5m32s
	  kubernetes-dashboard        kubernetes-dashboard-cd95d586-l4fmb               0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         5m32s
	Allocated resources:
	  (Total limits may be over 100 percent, i.e., overcommitted.)
	  Resource           Requests    Limits
	  --------           --------    ------
	  cpu                850m (42%!)(MISSING)  0 (0%!)(MISSING)
	  memory             370Mi (4%!)(MISSING)  170Mi (2%!)(MISSING)
	  ephemeral-storage  100Mi (0%!)(MISSING)  0 (0%!)(MISSING)
	  hugepages-1Gi      0 (0%!)(MISSING)      0 (0%!)(MISSING)
	  hugepages-2Mi      0 (0%!)(MISSING)      0 (0%!)(MISSING)
	  hugepages-32Mi     0 (0%!)(MISSING)      0 (0%!)(MISSING)
	  hugepages-64Ki     0 (0%!)(MISSING)      0 (0%!)(MISSING)
	Events:
	  Type    Reason                   Age                    From        Message
	  ----    ------                   ----                   ----        -------
	  Normal  NodeHasSufficientMemory  8m29s (x4 over 8m29s)  kubelet     Node old-k8s-version-577369 status is now: NodeHasSufficientMemory
	  Normal  NodeHasNoDiskPressure    8m29s (x4 over 8m29s)  kubelet     Node old-k8s-version-577369 status is now: NodeHasNoDiskPressure
	  Normal  NodeHasSufficientPID     8m29s (x3 over 8m29s)  kubelet     Node old-k8s-version-577369 status is now: NodeHasSufficientPID
	  Normal  Starting                 8m12s                  kubelet     Starting kubelet.
	  Normal  NodeHasSufficientMemory  8m12s                  kubelet     Node old-k8s-version-577369 status is now: NodeHasSufficientMemory
	  Normal  NodeHasNoDiskPressure    8m12s                  kubelet     Node old-k8s-version-577369 status is now: NodeHasNoDiskPressure
	  Normal  NodeHasSufficientPID     8m12s                  kubelet     Node old-k8s-version-577369 status is now: NodeHasSufficientPID
	  Normal  NodeNotReady             8m12s                  kubelet     Node old-k8s-version-577369 status is now: NodeNotReady
	  Normal  NodeAllocatableEnforced  8m12s                  kubelet     Updated Node Allocatable limit across pods
	  Normal  NodeReady                8m2s                   kubelet     Node old-k8s-version-577369 status is now: NodeReady
	  Normal  Starting                 7m59s                  kube-proxy  Starting kube-proxy.
	  Normal  Starting                 6m3s                   kubelet     Starting kubelet.
	  Normal  NodeHasSufficientMemory  6m3s (x8 over 6m3s)    kubelet     Node old-k8s-version-577369 status is now: NodeHasSufficientMemory
	  Normal  NodeHasNoDiskPressure    6m3s (x8 over 6m3s)    kubelet     Node old-k8s-version-577369 status is now: NodeHasNoDiskPressure
	  Normal  NodeHasSufficientPID     6m3s (x7 over 6m3s)    kubelet     Node old-k8s-version-577369 status is now: NodeHasSufficientPID
	  Normal  NodeAllocatableEnforced  6m3s                   kubelet     Updated Node Allocatable limit across pods
	  Normal  Starting                 5m48s                  kube-proxy  Starting kube-proxy.
	
	
	==> dmesg <==
	[  +0.000966] FS-Cache: N-cookie d=00000000f28f4ba9{9p.inode} n=00000000552c27c9
	[  +0.001066] FS-Cache: N-key=[8] '836ced0000000000'
	[  +0.008071] FS-Cache: Duplicate cookie detected
	[  +0.000721] FS-Cache: O-cookie c=00000006 [p=00000003 fl=226 nc=0 na=1]
	[  +0.001007] FS-Cache: O-cookie d=00000000f28f4ba9{9p.inode} n=0000000040d3cb50
	[  +0.001263] FS-Cache: O-key=[8] '836ced0000000000'
	[  +0.000776] FS-Cache: N-cookie c=0000000d [p=00000003 fl=2 nc=0 na=1]
	[  +0.000959] FS-Cache: N-cookie d=00000000f28f4ba9{9p.inode} n=000000005ce3da04
	[  +0.001044] FS-Cache: N-key=[8] '836ced0000000000'
	[  +3.435202] FS-Cache: Duplicate cookie detected
	[  +0.000686] FS-Cache: O-cookie c=00000004 [p=00000003 fl=226 nc=0 na=1]
	[  +0.001032] FS-Cache: O-cookie d=00000000f28f4ba9{9p.inode} n=00000000b15183e6
	[  +0.001024] FS-Cache: O-key=[8] '826ced0000000000'
	[  +0.000690] FS-Cache: N-cookie c=0000000f [p=00000003 fl=2 nc=0 na=1]
	[  +0.000936] FS-Cache: N-cookie d=00000000f28f4ba9{9p.inode} n=000000002b1edaf2
	[  +0.001040] FS-Cache: N-key=[8] '826ced0000000000'
	[  +0.341946] FS-Cache: Duplicate cookie detected
	[  +0.000704] FS-Cache: O-cookie c=00000009 [p=00000003 fl=226 nc=0 na=1]
	[  +0.000945] FS-Cache: O-cookie d=00000000f28f4ba9{9p.inode} n=000000003afd8a69
	[  +0.001051] FS-Cache: O-key=[8] '8d6ced0000000000'
	[  +0.000694] FS-Cache: N-cookie c=00000010 [p=00000003 fl=2 nc=0 na=1]
	[  +0.000913] FS-Cache: N-cookie d=00000000f28f4ba9{9p.inode} n=000000008813ebd0
	[  +0.001026] FS-Cache: N-key=[8] '8d6ced0000000000'
	[Jun20 17:20] hrtimer: interrupt took 10275005 ns
	[Jun20 17:49] kmem.limit_in_bytes is deprecated and will be removed. Please report your usecase to linux-mm@kvack.org if you depend on this functionality.
	
	
	==> etcd [25a953b6e46d] <==
	2024-06-20 18:07:14.497109 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:07:24.496199 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:07:34.495823 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:07:44.495820 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:07:54.495814 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:08:04.495742 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:08:14.495933 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:08:24.495715 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:08:34.495776 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:08:44.496011 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:08:54.495859 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:09:04.495714 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:09:14.495833 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:09:24.495857 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:09:34.495737 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:09:44.497660 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:09:54.495754 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:10:04.495913 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:10:14.495976 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:10:24.495707 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:10:34.495868 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:10:44.495752 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:10:54.495958 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:11:04.495816 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:11:14.495932 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	
	
	==> etcd [81eaddbb4b45] <==
	raft2024/06/20 18:02:48 INFO: 9f0758e1c58a86ed became leader at term 2
	raft2024/06/20 18:02:48 INFO: raft.node: 9f0758e1c58a86ed elected leader 9f0758e1c58a86ed at term 2
	2024-06-20 18:02:48.824462 I | etcdserver: setting up the initial cluster version to 3.4
	2024-06-20 18:02:48.829701 N | etcdserver/membership: set the initial cluster version to 3.4
	2024-06-20 18:02:48.829756 I | etcdserver/api: enabled capabilities for version 3.4
	2024-06-20 18:02:48.829787 I | etcdserver: published {Name:old-k8s-version-577369 ClientURLs:[https://192.168.85.2:2379]} to cluster 68eaea490fab4e05
	2024-06-20 18:02:48.829874 I | embed: ready to serve client requests
	2024-06-20 18:02:48.835481 I | embed: serving client requests on 192.168.85.2:2379
	2024-06-20 18:02:48.835536 I | embed: ready to serve client requests
	2024-06-20 18:02:48.836556 I | embed: serving client requests on 127.0.0.1:2379
	2024-06-20 18:02:58.193564 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:03:10.851043 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:03:13.496089 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:03:23.495843 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:03:33.496459 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:03:43.495919 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:03:53.495960 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:04:03.495822 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:04:13.495956 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:04:23.496205 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:04:33.496336 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:04:43.496045 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-06-20 18:04:51.121147 N | pkg/osutil: received terminated signal, shutting down...
	WARNING: 2024/06/20 18:04:51 grpc: addrConn.createTransport failed to connect to {127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	2024-06-20 18:04:51.162172 I | etcdserver: skipped leadership transfer for single voting member cluster
	
	
	==> kernel <==
	 18:11:17 up  1:53,  0 users,  load average: 3.37, 2.77, 3.43
	Linux old-k8s-version-577369 5.15.0-1063-aws #69~20.04.1-Ubuntu SMP Fri May 10 19:21:30 UTC 2024 aarch64 aarch64 aarch64 GNU/Linux
	PRETTY_NAME="Ubuntu 22.04.4 LTS"
	
	
	==> kube-apiserver [075e697d07c8] <==
	I0620 18:07:44.460574       1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379  <nil> 0 <nil>}] <nil> <nil>}
	I0620 18:07:44.460582       1 clientconn.go:948] ClientConn switching balancer to "pick_first"
	I0620 18:08:15.396472       1 client.go:360] parsed scheme: "passthrough"
	I0620 18:08:15.396513       1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379  <nil> 0 <nil>}] <nil> <nil>}
	I0620 18:08:15.396533       1 clientconn.go:948] ClientConn switching balancer to "pick_first"
	W0620 18:08:28.122013       1 handler_proxy.go:102] no RequestInfo found in the context
	E0620 18:08:28.122084       1 controller.go:116] loading OpenAPI spec for "v1beta1.metrics.k8s.io" failed with: failed to retrieve openAPI spec, http error: ResponseCode: 503, Body: service unavailable
	, Header: map[Content-Type:[text/plain; charset=utf-8] X-Content-Type-Options:[nosniff]]
	I0620 18:08:28.122093       1 controller.go:129] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
	I0620 18:08:47.177021       1 client.go:360] parsed scheme: "passthrough"
	I0620 18:08:47.177061       1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379  <nil> 0 <nil>}] <nil> <nil>}
	I0620 18:08:47.177070       1 clientconn.go:948] ClientConn switching balancer to "pick_first"
	I0620 18:09:32.117308       1 client.go:360] parsed scheme: "passthrough"
	I0620 18:09:32.117357       1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379  <nil> 0 <nil>}] <nil> <nil>}
	I0620 18:09:32.117366       1 clientconn.go:948] ClientConn switching balancer to "pick_first"
	I0620 18:10:15.239810       1 client.go:360] parsed scheme: "passthrough"
	I0620 18:10:15.239853       1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379  <nil> 0 <nil>}] <nil> <nil>}
	I0620 18:10:15.239865       1 clientconn.go:948] ClientConn switching balancer to "pick_first"
	W0620 18:10:24.749448       1 handler_proxy.go:102] no RequestInfo found in the context
	E0620 18:10:24.749532       1 controller.go:116] loading OpenAPI spec for "v1beta1.metrics.k8s.io" failed with: failed to retrieve openAPI spec, http error: ResponseCode: 503, Body: service unavailable
	, Header: map[Content-Type:[text/plain; charset=utf-8] X-Content-Type-Options:[nosniff]]
	I0620 18:10:24.749544       1 controller.go:129] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
	I0620 18:10:49.807185       1 client.go:360] parsed scheme: "passthrough"
	I0620 18:10:49.807327       1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379  <nil> 0 <nil>}] <nil> <nil>}
	I0620 18:10:49.807389       1 clientconn.go:948] ClientConn switching balancer to "pick_first"
	
	
	==> kube-apiserver [760f9a7d272c] <==
	W0620 18:04:51.166693       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0620 18:04:51.166736       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0620 18:04:51.166770       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0620 18:04:51.166808       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0620 18:04:51.166842       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0620 18:04:51.166901       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0620 18:04:51.166945       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0620 18:04:51.168598       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0620 18:04:51.168682       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0620 18:04:51.168722       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0620 18:04:51.168765       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0620 18:04:51.169168       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0620 18:04:51.169258       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0620 18:04:51.169267       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0620 18:04:51.169297       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0620 18:04:51.169339       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0620 18:04:51.169400       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0620 18:04:51.169474       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0620 18:04:51.169539       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0620 18:04:51.169591       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0620 18:04:51.169638       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0620 18:04:51.169680       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0620 18:04:51.169720       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0620 18:04:51.169755       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0620 18:04:51.169948       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	
	
	==> kube-controller-manager [a4eee0f3ea35] <==
	I0620 18:03:15.694704       1 shared_informer.go:247] Caches are synced for ReplicationController 
	I0620 18:03:15.694761       1 shared_informer.go:247] Caches are synced for attach detach 
	I0620 18:03:15.698309       1 shared_informer.go:247] Caches are synced for resource quota 
	I0620 18:03:15.707927       1 shared_informer.go:247] Caches are synced for persistent volume 
	I0620 18:03:15.708754       1 shared_informer.go:247] Caches are synced for daemon sets 
	I0620 18:03:15.713159       1 event.go:291] "Event occurred" object="kube-system/coredns" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set coredns-74ff55c5b to 2"
	I0620 18:03:15.724017       1 event.go:291] "Event occurred" object="kube-system/coredns-74ff55c5b" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-74ff55c5b-jdqbb"
	E0620 18:03:15.735260       1 clusterroleaggregation_controller.go:181] admin failed with : Operation cannot be fulfilled on clusterroles.rbac.authorization.k8s.io "admin": the object has been modified; please apply your changes to the latest version and try again
	I0620 18:03:15.761972       1 event.go:291] "Event occurred" object="kube-system/coredns-74ff55c5b" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-74ff55c5b-92t8n"
	I0620 18:03:15.777119       1 event.go:291] "Event occurred" object="kube-system/kube-proxy" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-qh57b"
	I0620 18:03:15.835276       1 shared_informer.go:240] Waiting for caches to sync for garbage collector
	I0620 18:03:16.135481       1 shared_informer.go:247] Caches are synced for garbage collector 
	I0620 18:03:16.145139       1 shared_informer.go:247] Caches are synced for garbage collector 
	I0620 18:03:16.145160       1 garbagecollector.go:151] Garbage collector: all resource monitors have synced. Proceeding to collect garbage
	I0620 18:03:16.443989       1 request.go:655] Throttling request took 1.045563564s, request: GET:https://192.168.85.2:8443/apis/extensions/v1beta1?timeout=32s
	I0620 18:03:17.245787       1 shared_informer.go:240] Waiting for caches to sync for resource quota
	I0620 18:03:17.245826       1 shared_informer.go:247] Caches are synced for resource quota 
	I0620 18:03:17.445070       1 event.go:291] "Event occurred" object="kube-system/coredns" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-74ff55c5b to 1"
	I0620 18:03:17.469412       1 event.go:291] "Event occurred" object="kube-system/coredns-74ff55c5b" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-74ff55c5b-jdqbb"
	I0620 18:04:49.568039       1 event.go:291] "Event occurred" object="kube-system/metrics-server" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set metrics-server-9975d5f86 to 1"
	I0620 18:04:49.651946       1 event.go:291] "Event occurred" object="kube-system/metrics-server-9975d5f86" kind="ReplicaSet" apiVersion="apps/v1" type="Warning" reason="FailedCreate" message="Error creating: pods \"metrics-server-9975d5f86-\" is forbidden: error looking up service account kube-system/metrics-server: serviceaccount \"metrics-server\" not found"
	E0620 18:04:49.683547       1 replica_set.go:532] sync "kube-system/metrics-server-9975d5f86" failed with pods "metrics-server-9975d5f86-" is forbidden: error looking up service account kube-system/metrics-server: serviceaccount "metrics-server" not found
	E0620 18:04:49.987319       1 clusterroleaggregation_controller.go:181] view failed with : Operation cannot be fulfilled on clusterroles.rbac.authorization.k8s.io "view": the object has been modified; please apply your changes to the latest version and try again
	E0620 18:04:50.027474       1 clusterroleaggregation_controller.go:181] edit failed with : Operation cannot be fulfilled on clusterroles.rbac.authorization.k8s.io "edit": the object has been modified; please apply your changes to the latest version and try again
	I0620 18:04:50.784892       1 event.go:291] "Event occurred" object="kube-system/metrics-server-9975d5f86" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: metrics-server-9975d5f86-nxk7p"
	
	
	==> kube-controller-manager [aab72b193950] <==
	W0620 18:06:50.438872       1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
	E0620 18:07:16.458728       1 resource_quota_controller.go:409] unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: the server is currently unable to handle the request
	I0620 18:07:22.089367       1 request.go:655] Throttling request took 1.048263979s, request: GET:https://192.168.85.2:8443/apis/authorization.k8s.io/v1beta1?timeout=32s
	W0620 18:07:22.940727       1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
	E0620 18:07:46.960512       1 resource_quota_controller.go:409] unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: the server is currently unable to handle the request
	I0620 18:07:54.591142       1 request.go:655] Throttling request took 1.048376276s, request: GET:https://192.168.85.2:8443/apis/authentication.k8s.io/v1?timeout=32s
	W0620 18:07:55.442754       1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
	E0620 18:08:17.462296       1 resource_quota_controller.go:409] unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: the server is currently unable to handle the request
	I0620 18:08:27.093235       1 request.go:655] Throttling request took 1.048362655s, request: GET:https://192.168.85.2:8443/apis/scheduling.k8s.io/v1?timeout=32s
	W0620 18:08:27.944537       1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
	E0620 18:08:47.964068       1 resource_quota_controller.go:409] unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: the server is currently unable to handle the request
	I0620 18:08:59.595153       1 request.go:655] Throttling request took 1.048256777s, request: GET:https://192.168.85.2:8443/apis/batch/v1beta1?timeout=32s
	W0620 18:09:00.446727       1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
	E0620 18:09:18.466128       1 resource_quota_controller.go:409] unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: the server is currently unable to handle the request
	I0620 18:09:32.097277       1 request.go:655] Throttling request took 1.048408366s, request: GET:https://192.168.85.2:8443/apis/extensions/v1beta1?timeout=32s
	W0620 18:09:32.948700       1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
	E0620 18:09:48.967983       1 resource_quota_controller.go:409] unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: the server is currently unable to handle the request
	I0620 18:10:04.599064       1 request.go:655] Throttling request took 1.048220114s, request: GET:https://192.168.85.2:8443/apis/batch/v1beta1?timeout=32s
	W0620 18:10:05.450438       1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
	E0620 18:10:19.469955       1 resource_quota_controller.go:409] unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: the server is currently unable to handle the request
	I0620 18:10:37.100769       1 request.go:655] Throttling request took 1.047890726s, request: GET:https://192.168.85.2:8443/apis/extensions/v1beta1?timeout=32s
	W0620 18:10:37.952310       1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
	E0620 18:10:49.971751       1 resource_quota_controller.go:409] unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: the server is currently unable to handle the request
	I0620 18:11:09.602839       1 request.go:655] Throttling request took 1.048301491s, request: GET:https://192.168.85.2:8443/apis/extensions/v1beta1?timeout=32s
	W0620 18:11:10.454356       1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
	
	
	==> kube-proxy [0b32a67571bb] <==
	I0620 18:05:28.432216       1 node.go:172] Successfully retrieved node IP: 192.168.85.2
	I0620 18:05:28.432301       1 server_others.go:142] kube-proxy node IP is an IPv4 address (192.168.85.2), assume IPv4 operation
	W0620 18:05:28.464983       1 server_others.go:578] Unknown proxy mode "", assuming iptables proxy
	I0620 18:05:28.465145       1 server_others.go:185] Using iptables Proxier.
	I0620 18:05:28.465442       1 server.go:650] Version: v1.20.0
	I0620 18:05:28.465967       1 config.go:315] Starting service config controller
	I0620 18:05:28.466017       1 shared_informer.go:240] Waiting for caches to sync for service config
	I0620 18:05:28.472264       1 config.go:224] Starting endpoint slice config controller
	I0620 18:05:28.472291       1 shared_informer.go:240] Waiting for caches to sync for endpoint slice config
	I0620 18:05:28.567067       1 shared_informer.go:247] Caches are synced for service config 
	I0620 18:05:28.575135       1 shared_informer.go:247] Caches are synced for endpoint slice config 
	
	
	==> kube-proxy [c47b7591b320] <==
	I0620 18:03:17.285474       1 node.go:172] Successfully retrieved node IP: 192.168.85.2
	I0620 18:03:17.285570       1 server_others.go:142] kube-proxy node IP is an IPv4 address (192.168.85.2), assume IPv4 operation
	W0620 18:03:17.335755       1 server_others.go:578] Unknown proxy mode "", assuming iptables proxy
	I0620 18:03:17.335849       1 server_others.go:185] Using iptables Proxier.
	I0620 18:03:17.336057       1 server.go:650] Version: v1.20.0
	I0620 18:03:17.336506       1 config.go:315] Starting service config controller
	I0620 18:03:17.336521       1 shared_informer.go:240] Waiting for caches to sync for service config
	I0620 18:03:17.338418       1 config.go:224] Starting endpoint slice config controller
	I0620 18:03:17.338462       1 shared_informer.go:240] Waiting for caches to sync for endpoint slice config
	I0620 18:03:17.440497       1 shared_informer.go:247] Caches are synced for endpoint slice config 
	I0620 18:03:17.440609       1 shared_informer.go:247] Caches are synced for service config 
	
	
	==> kube-scheduler [4ed5438feb8b] <==
	I0620 18:05:18.039312       1 serving.go:331] Generated self-signed cert in-memory
	W0620 18:05:23.584231       1 requestheader_controller.go:193] Unable to get configmap/extension-apiserver-authentication in kube-system.  Usually fixed by 'kubectl create rolebinding -n kube-system ROLEBINDING_NAME --role=extension-apiserver-authentication-reader --serviceaccount=YOUR_NS:YOUR_SA'
	W0620 18:05:23.584269       1 authentication.go:332] Error looking up in-cluster authentication configuration: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot get resource "configmaps" in API group "" in the namespace "kube-system"
	W0620 18:05:23.584295       1 authentication.go:333] Continuing without authentication configuration. This may treat all requests as anonymous.
	W0620 18:05:23.584308       1 authentication.go:334] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
	I0620 18:05:23.998366       1 configmap_cafile_content.go:202] Starting client-ca::kube-system::extension-apiserver-authentication::client-ca-file
	I0620 18:05:23.998397       1 shared_informer.go:240] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
	I0620 18:05:24.016711       1 tlsconfig.go:240] Starting DynamicServingCertificateController
	I0620 18:05:24.036748       1 secure_serving.go:197] Serving securely on 127.0.0.1:10259
	I0620 18:05:24.199986       1 shared_informer.go:247] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file 
	
	
	==> kube-scheduler [ab177be73cae] <==
	I0620 18:02:52.304687       1 serving.go:331] Generated self-signed cert in-memory
	W0620 18:02:57.050580       1 requestheader_controller.go:193] Unable to get configmap/extension-apiserver-authentication in kube-system.  Usually fixed by 'kubectl create rolebinding -n kube-system ROLEBINDING_NAME --role=extension-apiserver-authentication-reader --serviceaccount=YOUR_NS:YOUR_SA'
	W0620 18:02:57.050626       1 authentication.go:332] Error looking up in-cluster authentication configuration: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot get resource "configmaps" in API group "" in the namespace "kube-system"
	W0620 18:02:57.050650       1 authentication.go:333] Continuing without authentication configuration. This may treat all requests as anonymous.
	W0620 18:02:57.050851       1 authentication.go:334] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
	I0620 18:02:57.167304       1 configmap_cafile_content.go:202] Starting client-ca::kube-system::extension-apiserver-authentication::client-ca-file
	I0620 18:02:57.167334       1 shared_informer.go:240] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
	I0620 18:02:57.170657       1 secure_serving.go:197] Serving securely on 127.0.0.1:10259
	I0620 18:02:57.170770       1 tlsconfig.go:240] Starting DynamicServingCertificateController
	E0620 18:02:57.185619       1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
	E0620 18:02:57.186036       1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
	E0620 18:02:57.186231       1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
	E0620 18:02:57.186379       1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
	E0620 18:02:57.186629       1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
	E0620 18:02:57.186933       1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
	E0620 18:02:57.187256       1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
	E0620 18:02:57.202746       1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1beta1.PodDisruptionBudget: failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
	E0620 18:02:57.203026       1 reflector.go:138] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
	E0620 18:02:57.203170       1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
	E0620 18:02:57.203275       1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
	E0620 18:02:57.203339       1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
	E0620 18:02:58.063127       1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
	I0620 18:02:58.767573       1 shared_informer.go:247] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file 
	
	
	==> kubelet <==
	Jun 20 18:08:57 old-k8s-version-577369 kubelet[1201]: E0620 18:08:57.213817    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	Jun 20 18:08:58 old-k8s-version-577369 kubelet[1201]: E0620 18:08:58.213216    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	Jun 20 18:09:11 old-k8s-version-577369 kubelet[1201]: E0620 18:09:11.219178    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	Jun 20 18:09:11 old-k8s-version-577369 kubelet[1201]: E0620 18:09:11.223804    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	Jun 20 18:09:25 old-k8s-version-577369 kubelet[1201]: E0620 18:09:25.213628    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	Jun 20 18:09:25 old-k8s-version-577369 kubelet[1201]: E0620 18:09:25.220085    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	Jun 20 18:09:37 old-k8s-version-577369 kubelet[1201]: E0620 18:09:37.218633    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	Jun 20 18:09:39 old-k8s-version-577369 kubelet[1201]: E0620 18:09:39.213129    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	Jun 20 18:09:49 old-k8s-version-577369 kubelet[1201]: E0620 18:09:49.212880    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	Jun 20 18:09:52 old-k8s-version-577369 kubelet[1201]: E0620 18:09:52.213161    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	Jun 20 18:10:01 old-k8s-version-577369 kubelet[1201]: E0620 18:10:01.220058    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	Jun 20 18:10:07 old-k8s-version-577369 kubelet[1201]: E0620 18:10:07.213133    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	Jun 20 18:10:16 old-k8s-version-577369 kubelet[1201]: E0620 18:10:16.212946    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	Jun 20 18:10:18 old-k8s-version-577369 kubelet[1201]: E0620 18:10:18.220165    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	Jun 20 18:10:31 old-k8s-version-577369 kubelet[1201]: E0620 18:10:31.213199    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	Jun 20 18:10:31 old-k8s-version-577369 kubelet[1201]: E0620 18:10:31.215427    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	Jun 20 18:10:42 old-k8s-version-577369 kubelet[1201]: E0620 18:10:42.213713    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	Jun 20 18:10:43 old-k8s-version-577369 kubelet[1201]: E0620 18:10:43.221571    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	Jun 20 18:10:55 old-k8s-version-577369 kubelet[1201]: E0620 18:10:55.213780    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	Jun 20 18:10:56 old-k8s-version-577369 kubelet[1201]: E0620 18:10:56.253796    1201 remote_image.go:113] PullImage "fake.domain/registry.k8s.io/echoserver:1.4" from image service failed: rpc error: code = Unknown desc = Error response from daemon: Get "https://fake.domain/v2/": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host
	Jun 20 18:10:56 old-k8s-version-577369 kubelet[1201]: E0620 18:10:56.253857    1201 kuberuntime_image.go:51] Pull image "fake.domain/registry.k8s.io/echoserver:1.4" failed: rpc error: code = Unknown desc = Error response from daemon: Get "https://fake.domain/v2/": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host
	Jun 20 18:10:56 old-k8s-version-577369 kubelet[1201]: E0620 18:10:56.254038    1201 kuberuntime_manager.go:829] container &Container{Name:metrics-server,Image:fake.domain/registry.k8s.io/echoserver:1.4,Command:[],Args:[--cert-dir=/tmp --secure-port=4443 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --kubelet-use-node-status-port --metric-resolution=60s --kubelet-insecure-tls],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:4443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{100 -3} {<nil>} 100m DecimalSI},memory: {{209715200 0} {<nil>}  BinarySI},},},VolumeMounts:[]VolumeMount{VolumeMount{Name:tmp-dir,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:metrics-server-token-xd7wc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:&Probe{Handler:Handler{Exe
c:nil,HTTPGet:&HTTPGetAction{Path:/livez,Port:{1 0 https},Host:,Scheme:HTTPS,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,},InitialDelaySeconds:0,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,},ReadinessProbe:&Probe{Handler:Handler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{1 0 https},Host:,Scheme:HTTPS,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,},InitialDelaySeconds:0,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod metrics-server-9975d5f86-nxk7p_kube-system(18cfc5
9b-503a-463e-80f0-2a08c7711566): ErrImagePull: rpc error: code = Unknown desc = Error response from daemon: Get "https://fake.domain/v2/": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host
	Jun 20 18:10:56 old-k8s-version-577369 kubelet[1201]: E0620 18:10:56.254103    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	Jun 20 18:11:09 old-k8s-version-577369 kubelet[1201]: E0620 18:11:09.227999    1201 pod_workers.go:191] Error syncing pod 18cfc59b-503a-463e-80f0-2a08c7711566 ("metrics-server-9975d5f86-nxk7p_kube-system(18cfc59b-503a-463e-80f0-2a08c7711566)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	Jun 20 18:11:10 old-k8s-version-577369 kubelet[1201]: E0620 18:11:10.213353    1201 pod_workers.go:191] Error syncing pod a0906cb1-30c6-4e85-9f0e-eb135fb00622 ("dashboard-metrics-scraper-8d5bb5db8-hhrcd_kubernetes-dashboard(a0906cb1-30c6-4e85-9f0e-eb135fb00622)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	
	
	==> kubernetes-dashboard [49efdc60eccc] <==
	2024/06/20 18:05:51 Starting overwatch
	2024/06/20 18:05:51 Using namespace: kubernetes-dashboard
	2024/06/20 18:05:51 Using in-cluster config to connect to apiserver
	2024/06/20 18:05:51 Using secret token for csrf signing
	2024/06/20 18:05:51 Initializing csrf token from kubernetes-dashboard-csrf secret
	2024/06/20 18:05:52 Empty token. Generating and storing in a secret kubernetes-dashboard-csrf
	2024/06/20 18:05:52 Successful initial request to the apiserver, version: v1.20.0
	2024/06/20 18:05:52 Generating JWE encryption key
	2024/06/20 18:05:52 New synchronizer has been registered: kubernetes-dashboard-key-holder-kubernetes-dashboard. Starting
	2024/06/20 18:05:52 Starting secret synchronizer for kubernetes-dashboard-key-holder in namespace kubernetes-dashboard
	2024/06/20 18:05:53 Initializing JWE encryption key from synchronized object
	2024/06/20 18:05:53 Creating in-cluster Sidecar client
	2024/06/20 18:05:53 Serving insecurely on HTTP port: 9090
	2024/06/20 18:05:53 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
	2024/06/20 18:06:23 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
	2024/06/20 18:06:53 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
	2024/06/20 18:07:23 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
	2024/06/20 18:07:53 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
	2024/06/20 18:08:23 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
	2024/06/20 18:08:53 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
	2024/06/20 18:09:23 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
	2024/06/20 18:09:53 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
	2024/06/20 18:10:23 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
	2024/06/20 18:10:53 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
	
	
	==> storage-provisioner [16e28ef3ddc9] <==
	I0620 18:05:27.998603       1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
	F0620 18:05:58.000630       1 main.go:39] error getting server version: Get "https://10.96.0.1:443/version?timeout=32s": dial tcp 10.96.0.1:443: i/o timeout
	
	
	==> storage-provisioner [74b467d165b4] <==
	I0620 18:06:09.360446       1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
	I0620 18:06:09.377319       1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
	I0620 18:06:09.377592       1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
	I0620 18:06:26.830163       1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
	I0620 18:06:26.830414       1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_old-k8s-version-577369_717f0702-e3a0-4de6-8b89-1f27d7268bc4!
	I0620 18:06:26.830628       1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"a6fbd6e7-503b-4865-b8a7-b9bb08510499", APIVersion:"v1", ResourceVersion:"823", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' old-k8s-version-577369_717f0702-e3a0-4de6-8b89-1f27d7268bc4 became leader
	I0620 18:06:26.931521       1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_old-k8s-version-577369_717f0702-e3a0-4de6-8b89-1f27d7268bc4!
	

                                                
                                                
-- /stdout --
helpers_test.go:254: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p old-k8s-version-577369 -n old-k8s-version-577369
helpers_test.go:261: (dbg) Run:  kubectl --context old-k8s-version-577369 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:272: non-running pods: metrics-server-9975d5f86-nxk7p dashboard-metrics-scraper-8d5bb5db8-hhrcd
helpers_test.go:274: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/SecondStart]: describe non-running pods <======
helpers_test.go:277: (dbg) Run:  kubectl --context old-k8s-version-577369 describe pod metrics-server-9975d5f86-nxk7p dashboard-metrics-scraper-8d5bb5db8-hhrcd
helpers_test.go:277: (dbg) Non-zero exit: kubectl --context old-k8s-version-577369 describe pod metrics-server-9975d5f86-nxk7p dashboard-metrics-scraper-8d5bb5db8-hhrcd: exit status 1 (85.667689ms)

                                                
                                                
** stderr ** 
	Error from server (NotFound): pods "metrics-server-9975d5f86-nxk7p" not found
	Error from server (NotFound): pods "dashboard-metrics-scraper-8d5bb5db8-hhrcd" not found

                                                
                                                
** /stderr **
helpers_test.go:279: kubectl --context old-k8s-version-577369 describe pod metrics-server-9975d5f86-nxk7p dashboard-metrics-scraper-8d5bb5db8-hhrcd: exit status 1
--- FAIL: TestStartStop/group/old-k8s-version/serial/SecondStart (376.34s)

                                                
                                    

Test pass (317/343)

Order passed test Duration
3 TestDownloadOnly/v1.20.0/json-events 10.26
4 TestDownloadOnly/v1.20.0/preload-exists 0
8 TestDownloadOnly/v1.20.0/LogsDuration 0.36
9 TestDownloadOnly/v1.20.0/DeleteAll 0.35
10 TestDownloadOnly/v1.20.0/DeleteAlwaysSucceeds 0.22
12 TestDownloadOnly/v1.30.2/json-events 5.35
13 TestDownloadOnly/v1.30.2/preload-exists 0
17 TestDownloadOnly/v1.30.2/LogsDuration 0.07
18 TestDownloadOnly/v1.30.2/DeleteAll 0.26
19 TestDownloadOnly/v1.30.2/DeleteAlwaysSucceeds 0.18
21 TestBinaryMirror 0.54
22 TestOffline 58.83
25 TestAddons/PreSetup/EnablingAddonOnNonExistingCluster 0.07
26 TestAddons/PreSetup/DisablingAddonOnNonExistingCluster 0.07
27 TestAddons/Setup 230.19
29 TestAddons/parallel/Registry 15.57
31 TestAddons/parallel/InspektorGadget 11.73
32 TestAddons/parallel/MetricsServer 5.74
35 TestAddons/parallel/CSI 67.54
36 TestAddons/parallel/Headlamp 11.96
37 TestAddons/parallel/CloudSpanner 5.51
38 TestAddons/parallel/LocalPath 55.44
39 TestAddons/parallel/NvidiaDevicePlugin 5.45
40 TestAddons/parallel/Yakd 6.01
41 TestAddons/parallel/Volcano 40.45
44 TestAddons/serial/GCPAuth/Namespaces 0.17
45 TestAddons/StoppedEnableDisable 11.12
46 TestCertOptions 41.42
47 TestCertExpiration 249.66
48 TestDockerFlags 48.55
49 TestForceSystemdFlag 42.45
50 TestForceSystemdEnv 42.69
56 TestErrorSpam/setup 31.7
57 TestErrorSpam/start 0.7
58 TestErrorSpam/status 0.96
59 TestErrorSpam/pause 1.45
60 TestErrorSpam/unpause 1.37
61 TestErrorSpam/stop 10.98
64 TestFunctional/serial/CopySyncFile 0
65 TestFunctional/serial/StartWithProxy 48.73
66 TestFunctional/serial/AuditLog 0
67 TestFunctional/serial/SoftStart 29.11
68 TestFunctional/serial/KubeContext 0.06
69 TestFunctional/serial/KubectlGetPods 0.11
72 TestFunctional/serial/CacheCmd/cache/add_remote 3.34
73 TestFunctional/serial/CacheCmd/cache/add_local 1.06
74 TestFunctional/serial/CacheCmd/cache/CacheDelete 0.08
75 TestFunctional/serial/CacheCmd/cache/list 0.05
76 TestFunctional/serial/CacheCmd/cache/verify_cache_inside_node 0.33
77 TestFunctional/serial/CacheCmd/cache/cache_reload 1.64
78 TestFunctional/serial/CacheCmd/cache/delete 0.11
79 TestFunctional/serial/MinikubeKubectlCmd 0.15
80 TestFunctional/serial/MinikubeKubectlCmdDirectly 0.13
81 TestFunctional/serial/ExtraConfig 44.67
82 TestFunctional/serial/ComponentHealth 0.1
83 TestFunctional/serial/LogsCmd 1.17
84 TestFunctional/serial/LogsFileCmd 1.15
85 TestFunctional/serial/InvalidService 4.61
87 TestFunctional/parallel/ConfigCmd 0.46
88 TestFunctional/parallel/DashboardCmd 11.86
89 TestFunctional/parallel/DryRun 0.52
90 TestFunctional/parallel/InternationalLanguage 0.23
91 TestFunctional/parallel/StatusCmd 1.08
95 TestFunctional/parallel/ServiceCmdConnect 12.68
96 TestFunctional/parallel/AddonsCmd 0.2
97 TestFunctional/parallel/PersistentVolumeClaim 26.87
99 TestFunctional/parallel/SSHCmd 0.66
100 TestFunctional/parallel/CpCmd 2.31
102 TestFunctional/parallel/FileSync 0.34
103 TestFunctional/parallel/CertSync 2.08
107 TestFunctional/parallel/NodeLabels 0.11
109 TestFunctional/parallel/NonActiveRuntimeDisabled 0.35
111 TestFunctional/parallel/License 0.24
113 TestFunctional/parallel/TunnelCmd/serial/RunSecondTunnel 0.61
114 TestFunctional/parallel/TunnelCmd/serial/StartTunnel 0
116 TestFunctional/parallel/TunnelCmd/serial/WaitService/Setup 8.44
117 TestFunctional/parallel/TunnelCmd/serial/WaitService/IngressIP 0.11
118 TestFunctional/parallel/TunnelCmd/serial/AccessDirect 0
122 TestFunctional/parallel/TunnelCmd/serial/DeleteTunnel 0.11
123 TestFunctional/parallel/ServiceCmd/DeployApp 7.26
124 TestFunctional/parallel/ProfileCmd/profile_not_create 0.52
125 TestFunctional/parallel/ProfileCmd/profile_list 0.39
126 TestFunctional/parallel/ProfileCmd/profile_json_output 0.37
127 TestFunctional/parallel/MountCmd/any-port 8.58
128 TestFunctional/parallel/ServiceCmd/List 0.68
129 TestFunctional/parallel/ServiceCmd/JSONOutput 0.52
130 TestFunctional/parallel/ServiceCmd/HTTPS 0.5
131 TestFunctional/parallel/ServiceCmd/Format 0.53
132 TestFunctional/parallel/ServiceCmd/URL 0.41
133 TestFunctional/parallel/MountCmd/specific-port 2.07
134 TestFunctional/parallel/MountCmd/VerifyCleanup 2.7
135 TestFunctional/parallel/Version/short 0.07
136 TestFunctional/parallel/Version/components 0.99
137 TestFunctional/parallel/ImageCommands/ImageListShort 0.25
138 TestFunctional/parallel/ImageCommands/ImageListTable 0.26
139 TestFunctional/parallel/ImageCommands/ImageListJson 0.25
140 TestFunctional/parallel/ImageCommands/ImageListYaml 0.22
141 TestFunctional/parallel/ImageCommands/ImageBuild 2.43
142 TestFunctional/parallel/ImageCommands/Setup 1.93
143 TestFunctional/parallel/ImageCommands/ImageLoadDaemon 4.53
144 TestFunctional/parallel/UpdateContextCmd/no_changes 0.2
145 TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster 0.2
146 TestFunctional/parallel/UpdateContextCmd/no_clusters 0.16
147 TestFunctional/parallel/DockerEnv/bash 1.33
148 TestFunctional/parallel/ImageCommands/ImageReloadDaemon 2.98
149 TestFunctional/parallel/ImageCommands/ImageTagAndLoadDaemon 5.43
150 TestFunctional/parallel/ImageCommands/ImageSaveToFile 0.9
151 TestFunctional/parallel/ImageCommands/ImageRemove 0.45
152 TestFunctional/parallel/ImageCommands/ImageLoadFromFile 1.28
153 TestFunctional/parallel/ImageCommands/ImageSaveDaemon 0.96
154 TestFunctional/delete_addon-resizer_images 0.08
155 TestFunctional/delete_my-image_image 0.02
156 TestFunctional/delete_minikube_cached_images 0.02
160 TestMultiControlPlane/serial/StartCluster 137.46
161 TestMultiControlPlane/serial/DeployApp 41.33
162 TestMultiControlPlane/serial/PingHostFromPods 1.73
163 TestMultiControlPlane/serial/AddWorkerNode 25.92
164 TestMultiControlPlane/serial/NodeLabels 0.12
165 TestMultiControlPlane/serial/HAppyAfterClusterStart 0.74
166 TestMultiControlPlane/serial/CopyFile 19.06
167 TestMultiControlPlane/serial/StopSecondaryNode 11.84
168 TestMultiControlPlane/serial/DegradedAfterControlPlaneNodeStop 0.54
169 TestMultiControlPlane/serial/RestartSecondaryNode 62.92
170 TestMultiControlPlane/serial/HAppyAfterSecondaryNodeRestart 0.77
171 TestMultiControlPlane/serial/RestartClusterKeepsNodes 215.66
172 TestMultiControlPlane/serial/DeleteSecondaryNode 12.56
173 TestMultiControlPlane/serial/DegradedAfterSecondaryNodeDelete 0.57
174 TestMultiControlPlane/serial/StopCluster 32.61
175 TestMultiControlPlane/serial/RestartCluster 86.11
176 TestMultiControlPlane/serial/DegradedAfterClusterRestart 0.58
177 TestMultiControlPlane/serial/AddSecondaryNode 44.78
178 TestMultiControlPlane/serial/HAppyAfterSecondaryNodeAdd 0.79
181 TestImageBuild/serial/Setup 34.97
182 TestImageBuild/serial/NormalBuild 1.8
183 TestImageBuild/serial/BuildWithBuildArg 0.9
184 TestImageBuild/serial/BuildWithDockerIgnore 0.71
185 TestImageBuild/serial/BuildWithSpecifiedDockerfile 0.67
189 TestJSONOutput/start/Command 47.67
190 TestJSONOutput/start/Audit 0
192 TestJSONOutput/start/parallel/DistinctCurrentSteps 0
193 TestJSONOutput/start/parallel/IncreasingCurrentSteps 0
195 TestJSONOutput/pause/Command 0.61
196 TestJSONOutput/pause/Audit 0
198 TestJSONOutput/pause/parallel/DistinctCurrentSteps 0
199 TestJSONOutput/pause/parallel/IncreasingCurrentSteps 0
201 TestJSONOutput/unpause/Command 0.52
202 TestJSONOutput/unpause/Audit 0
204 TestJSONOutput/unpause/parallel/DistinctCurrentSteps 0
205 TestJSONOutput/unpause/parallel/IncreasingCurrentSteps 0
207 TestJSONOutput/stop/Command 10.9
208 TestJSONOutput/stop/Audit 0
210 TestJSONOutput/stop/parallel/DistinctCurrentSteps 0
211 TestJSONOutput/stop/parallel/IncreasingCurrentSteps 0
212 TestErrorJSONOutput 0.21
214 TestKicCustomNetwork/create_custom_network 33.49
215 TestKicCustomNetwork/use_default_bridge_network 35.68
216 TestKicExistingNetwork 35.22
217 TestKicCustomSubnet 35.74
218 TestKicStaticIP 34.06
219 TestMainNoArgs 0.05
220 TestMinikubeProfile 68.85
223 TestMountStart/serial/StartWithMountFirst 10.6
224 TestMountStart/serial/VerifyMountFirst 0.27
225 TestMountStart/serial/StartWithMountSecond 10.52
226 TestMountStart/serial/VerifyMountSecond 0.25
227 TestMountStart/serial/DeleteFirst 1.45
228 TestMountStart/serial/VerifyMountPostDelete 0.24
229 TestMountStart/serial/Stop 1.19
230 TestMountStart/serial/RestartStopped 8.33
231 TestMountStart/serial/VerifyMountPostStop 0.26
234 TestMultiNode/serial/FreshStart2Nodes 79.71
235 TestMultiNode/serial/DeployApp2Nodes 44.36
236 TestMultiNode/serial/PingHostFrom2Pods 1.01
237 TestMultiNode/serial/AddNode 20.38
238 TestMultiNode/serial/MultiNodeLabels 0.11
239 TestMultiNode/serial/ProfileList 0.34
240 TestMultiNode/serial/CopyFile 9.99
241 TestMultiNode/serial/StopNode 2.25
242 TestMultiNode/serial/StartAfterStop 11.11
243 TestMultiNode/serial/RestartKeepsNodes 87.6
244 TestMultiNode/serial/DeleteNode 5.61
245 TestMultiNode/serial/StopMultiNode 21.51
246 TestMultiNode/serial/RestartMultiNode 32.03
247 TestMultiNode/serial/ValidateNameConflict 35.59
252 TestPreload 147.26
254 TestScheduledStopUnix 103.75
255 TestSkaffold 116.11
257 TestInsufficientStorage 10.73
258 TestRunningBinaryUpgrade 83.01
260 TestKubernetesUpgrade 366.86
261 TestMissingContainerUpgrade 119.33
263 TestNoKubernetes/serial/StartNoK8sWithVersion 0.11
264 TestNoKubernetes/serial/StartWithK8s 45.34
265 TestNoKubernetes/serial/StartWithStopK8s 17.22
277 TestNoKubernetes/serial/Start 11.15
278 TestNoKubernetes/serial/VerifyK8sNotRunning 0.31
279 TestNoKubernetes/serial/ProfileList 0.85
280 TestNoKubernetes/serial/Stop 1.25
281 TestNoKubernetes/serial/StartNoArgs 8.44
282 TestNoKubernetes/serial/VerifyK8sNotRunningSecond 0.32
283 TestStoppedBinaryUpgrade/Setup 0.64
284 TestStoppedBinaryUpgrade/Upgrade 110.04
285 TestStoppedBinaryUpgrade/MinikubeLogs 1.34
294 TestPause/serial/Start 88.64
295 TestPause/serial/SecondStartNoReconfiguration 34.65
296 TestPause/serial/Pause 0.61
297 TestPause/serial/VerifyStatus 0.31
298 TestPause/serial/Unpause 0.61
299 TestPause/serial/PauseAgain 0.92
300 TestPause/serial/DeletePaused 2.35
301 TestPause/serial/VerifyDeletedResources 14.05
302 TestNetworkPlugins/group/auto/Start 87.87
303 TestNetworkPlugins/group/auto/KubeletFlags 0.39
304 TestNetworkPlugins/group/auto/NetCatPod 13.35
305 TestNetworkPlugins/group/kindnet/Start 65.56
306 TestNetworkPlugins/group/auto/DNS 0.34
307 TestNetworkPlugins/group/auto/Localhost 0.21
308 TestNetworkPlugins/group/auto/HairPin 0.23
309 TestNetworkPlugins/group/calico/Start 80.96
310 TestNetworkPlugins/group/kindnet/ControllerPod 6.01
311 TestNetworkPlugins/group/kindnet/KubeletFlags 0.37
312 TestNetworkPlugins/group/kindnet/NetCatPod 9.33
313 TestNetworkPlugins/group/kindnet/DNS 0.2
314 TestNetworkPlugins/group/kindnet/Localhost 0.21
315 TestNetworkPlugins/group/kindnet/HairPin 0.19
316 TestNetworkPlugins/group/custom-flannel/Start 65.63
317 TestNetworkPlugins/group/calico/ControllerPod 6.01
318 TestNetworkPlugins/group/calico/KubeletFlags 0.38
319 TestNetworkPlugins/group/calico/NetCatPod 11.38
320 TestNetworkPlugins/group/calico/DNS 0.29
321 TestNetworkPlugins/group/calico/Localhost 0.25
322 TestNetworkPlugins/group/calico/HairPin 0.28
323 TestNetworkPlugins/group/false/Start 93.64
324 TestNetworkPlugins/group/custom-flannel/KubeletFlags 0.36
325 TestNetworkPlugins/group/custom-flannel/NetCatPod 11.33
326 TestNetworkPlugins/group/custom-flannel/DNS 0.26
327 TestNetworkPlugins/group/custom-flannel/Localhost 0.24
328 TestNetworkPlugins/group/custom-flannel/HairPin 0.2
329 TestNetworkPlugins/group/enable-default-cni/Start 49.62
330 TestNetworkPlugins/group/false/KubeletFlags 0.4
331 TestNetworkPlugins/group/false/NetCatPod 11.5
332 TestNetworkPlugins/group/enable-default-cni/KubeletFlags 0.3
333 TestNetworkPlugins/group/enable-default-cni/NetCatPod 12.33
334 TestNetworkPlugins/group/false/DNS 0.18
335 TestNetworkPlugins/group/false/Localhost 0.16
336 TestNetworkPlugins/group/false/HairPin 0.16
337 TestNetworkPlugins/group/enable-default-cni/DNS 0.32
338 TestNetworkPlugins/group/enable-default-cni/Localhost 0.27
339 TestNetworkPlugins/group/enable-default-cni/HairPin 0.21
340 TestNetworkPlugins/group/flannel/Start 66.56
341 TestNetworkPlugins/group/bridge/Start 56.59
342 TestNetworkPlugins/group/bridge/KubeletFlags 0.44
343 TestNetworkPlugins/group/bridge/NetCatPod 11.4
344 TestNetworkPlugins/group/flannel/ControllerPod 6.01
345 TestNetworkPlugins/group/flannel/KubeletFlags 0.29
346 TestNetworkPlugins/group/flannel/NetCatPod 11.27
347 TestNetworkPlugins/group/bridge/DNS 0.24
348 TestNetworkPlugins/group/bridge/Localhost 0.24
349 TestNetworkPlugins/group/bridge/HairPin 0.23
350 TestNetworkPlugins/group/flannel/DNS 0.27
351 TestNetworkPlugins/group/flannel/Localhost 0.24
352 TestNetworkPlugins/group/flannel/HairPin 0.36
353 TestNetworkPlugins/group/kubenet/Start 94.77
355 TestStartStop/group/old-k8s-version/serial/FirstStart 151.84
356 TestNetworkPlugins/group/kubenet/KubeletFlags 0.29
357 TestNetworkPlugins/group/kubenet/NetCatPod 11.32
358 TestNetworkPlugins/group/kubenet/DNS 0.18
359 TestNetworkPlugins/group/kubenet/Localhost 0.16
360 TestNetworkPlugins/group/kubenet/HairPin 0.16
362 TestStartStop/group/embed-certs/serial/FirstStart 50.26
363 TestStartStop/group/old-k8s-version/serial/DeployApp 8.74
364 TestStartStop/group/old-k8s-version/serial/EnableAddonWhileActive 1.89
365 TestStartStop/group/old-k8s-version/serial/Stop 11.27
366 TestStartStop/group/embed-certs/serial/DeployApp 8.36
367 TestStartStop/group/embed-certs/serial/EnableAddonWhileActive 1.56
368 TestStartStop/group/old-k8s-version/serial/EnableAddonAfterStop 0.26
370 TestStartStop/group/embed-certs/serial/Stop 11.36
371 TestStartStop/group/embed-certs/serial/EnableAddonAfterStop 0.29
372 TestStartStop/group/embed-certs/serial/SecondStart 293.43
373 TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop 6.01
374 TestStartStop/group/embed-certs/serial/AddonExistsAfterStop 5.1
375 TestStartStop/group/embed-certs/serial/VerifyKubernetesImages 0.27
376 TestStartStop/group/embed-certs/serial/Pause 2.91
378 TestStartStop/group/no-preload/serial/FirstStart 99
379 TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop 6.01
380 TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop 5.12
381 TestStartStop/group/old-k8s-version/serial/VerifyKubernetesImages 0.25
382 TestStartStop/group/old-k8s-version/serial/Pause 2.91
384 TestStartStop/group/default-k8s-diff-port/serial/FirstStart 85.84
385 TestStartStop/group/no-preload/serial/DeployApp 8.44
386 TestStartStop/group/no-preload/serial/EnableAddonWhileActive 1.13
387 TestStartStop/group/no-preload/serial/Stop 10.87
388 TestStartStop/group/no-preload/serial/EnableAddonAfterStop 0.2
389 TestStartStop/group/no-preload/serial/SecondStart 266.48
390 TestStartStop/group/default-k8s-diff-port/serial/DeployApp 8.42
391 TestStartStop/group/default-k8s-diff-port/serial/EnableAddonWhileActive 1.16
392 TestStartStop/group/default-k8s-diff-port/serial/Stop 10.87
393 TestStartStop/group/default-k8s-diff-port/serial/EnableAddonAfterStop 0.19
394 TestStartStop/group/default-k8s-diff-port/serial/SecondStart 267.47
395 TestStartStop/group/no-preload/serial/UserAppExistsAfterStop 6.01
396 TestStartStop/group/no-preload/serial/AddonExistsAfterStop 5.11
397 TestStartStop/group/no-preload/serial/VerifyKubernetesImages 0.26
398 TestStartStop/group/no-preload/serial/Pause 3.17
400 TestStartStop/group/newest-cni/serial/FirstStart 47.31
401 TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop 6.01
402 TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop 5.24
403 TestStartStop/group/newest-cni/serial/DeployApp 0
404 TestStartStop/group/newest-cni/serial/EnableAddonWhileActive 1.15
405 TestStartStop/group/newest-cni/serial/Stop 5.95
406 TestStartStop/group/default-k8s-diff-port/serial/VerifyKubernetesImages 0.26
407 TestStartStop/group/default-k8s-diff-port/serial/Pause 3.38
408 TestStartStop/group/newest-cni/serial/EnableAddonAfterStop 0.47
409 TestStartStop/group/newest-cni/serial/SecondStart 18.32
410 TestStartStop/group/newest-cni/serial/UserAppExistsAfterStop 0
411 TestStartStop/group/newest-cni/serial/AddonExistsAfterStop 0
412 TestStartStop/group/newest-cni/serial/VerifyKubernetesImages 0.27
413 TestStartStop/group/newest-cni/serial/Pause 2.61
x
+
TestDownloadOnly/v1.20.0/json-events (10.26s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.20.0/json-events
aaa_download_only_test.go:81: (dbg) Run:  out/minikube-linux-arm64 start -o=json --download-only -p download-only-475314 --force --alsologtostderr --kubernetes-version=v1.20.0 --container-runtime=docker --driver=docker  --container-runtime=docker
aaa_download_only_test.go:81: (dbg) Done: out/minikube-linux-arm64 start -o=json --download-only -p download-only-475314 --force --alsologtostderr --kubernetes-version=v1.20.0 --container-runtime=docker --driver=docker  --container-runtime=docker: (10.258343284s)
--- PASS: TestDownloadOnly/v1.20.0/json-events (10.26s)

                                                
                                    
x
+
TestDownloadOnly/v1.20.0/preload-exists (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.20.0/preload-exists
--- PASS: TestDownloadOnly/v1.20.0/preload-exists (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.20.0/LogsDuration (0.36s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.20.0/LogsDuration
aaa_download_only_test.go:184: (dbg) Run:  out/minikube-linux-arm64 logs -p download-only-475314
aaa_download_only_test.go:184: (dbg) Non-zero exit: out/minikube-linux-arm64 logs -p download-only-475314: exit status 85 (362.733015ms)

                                                
                                                
-- stdout --
	
	==> Audit <==
	|---------|--------------------------------|----------------------|---------|---------|---------------------|----------|
	| Command |              Args              |       Profile        |  User   | Version |     Start Time      | End Time |
	|---------|--------------------------------|----------------------|---------|---------|---------------------|----------|
	| start   | -o=json --download-only        | download-only-475314 | jenkins | v1.33.1 | 20 Jun 24 17:01 UTC |          |
	|         | -p download-only-475314        |                      |         |         |                     |          |
	|         | --force --alsologtostderr      |                      |         |         |                     |          |
	|         | --kubernetes-version=v1.20.0   |                      |         |         |                     |          |
	|         | --container-runtime=docker     |                      |         |         |                     |          |
	|         | --driver=docker                |                      |         |         |                     |          |
	|         | --container-runtime=docker     |                      |         |         |                     |          |
	|---------|--------------------------------|----------------------|---------|---------|---------------------|----------|
	
	
	==> Last Start <==
	Log file created at: 2024/06/20 17:01:28
	Running on machine: ip-172-31-30-239
	Binary: Built with gc go1.22.4 for linux/arm64
	Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
	I0620 17:01:28.339144    7790 out.go:291] Setting OutFile to fd 1 ...
	I0620 17:01:28.339299    7790 out.go:338] TERM=,COLORTERM=, which probably does not support color
	I0620 17:01:28.339312    7790 out.go:304] Setting ErrFile to fd 2...
	I0620 17:01:28.339317    7790 out.go:338] TERM=,COLORTERM=, which probably does not support color
	I0620 17:01:28.339578    7790 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19106-2452/.minikube/bin
	W0620 17:01:28.339761    7790 root.go:314] Error reading config file at /home/jenkins/minikube-integration/19106-2452/.minikube/config/config.json: open /home/jenkins/minikube-integration/19106-2452/.minikube/config/config.json: no such file or directory
	I0620 17:01:28.340218    7790 out.go:298] Setting JSON to true
	I0620 17:01:28.341041    7790 start.go:129] hostinfo: {"hostname":"ip-172-31-30-239","uptime":2639,"bootTime":1718900249,"procs":148,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1063-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"92f46a7d-c249-4c12-924a-77f64874c910"}
	I0620 17:01:28.341109    7790 start.go:139] virtualization:  
	I0620 17:01:28.344537    7790 out.go:97] [download-only-475314] minikube v1.33.1 on Ubuntu 20.04 (arm64)
	W0620 17:01:28.344788    7790 preload.go:294] Failed to list preload files: open /home/jenkins/minikube-integration/19106-2452/.minikube/cache/preloaded-tarball: no such file or directory
	I0620 17:01:28.344852    7790 notify.go:220] Checking for updates...
	I0620 17:01:28.346950    7790 out.go:169] MINIKUBE_LOCATION=19106
	I0620 17:01:28.349035    7790 out.go:169] MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I0620 17:01:28.350881    7790 out.go:169] KUBECONFIG=/home/jenkins/minikube-integration/19106-2452/kubeconfig
	I0620 17:01:28.353146    7790 out.go:169] MINIKUBE_HOME=/home/jenkins/minikube-integration/19106-2452/.minikube
	I0620 17:01:28.355265    7790 out.go:169] MINIKUBE_BIN=out/minikube-linux-arm64
	W0620 17:01:28.359472    7790 out.go:267] minikube skips various validations when --force is supplied; this may lead to unexpected behavior
	I0620 17:01:28.359742    7790 driver.go:392] Setting default libvirt URI to qemu:///system
	I0620 17:01:28.392682    7790 docker.go:122] docker version: linux-26.1.4:Docker Engine - Community
	I0620 17:01:28.392758    7790 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0620 17:01:28.709808    7790 info.go:266] docker info: {ID:6ZPO:QZND:VNGE:LUKL:4Y3K:XELL:AAX4:2GTK:E6LM:MPRN:3ZXR:TTMR Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:29 OomKillDisable:true NGoroutines:52 SystemTime:2024-06-20 17:01:28.700379372 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1063-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aar
ch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214900736 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-30-239 Labels:[] ExperimentalBuild:false ServerVersion:26.1.4 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d2d58213f83a351ca8f528a95fbd145f5654e957 Expected:d2d58213f83a351ca8f528a95fbd145f5654e957} RuncCommit:{ID:v1.1.12-0-g51d5e94 Expected:v1.1.12-0-g51d5e94} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErro
rs:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.14.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.27.1]] Warnings:<nil>}}
	I0620 17:01:28.709914    7790 docker.go:295] overlay module found
	I0620 17:01:28.712255    7790 out.go:97] Using the docker driver based on user configuration
	I0620 17:01:28.712286    7790 start.go:297] selected driver: docker
	I0620 17:01:28.712294    7790 start.go:901] validating driver "docker" against <nil>
	I0620 17:01:28.712416    7790 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0620 17:01:28.762956    7790 info.go:266] docker info: {ID:6ZPO:QZND:VNGE:LUKL:4Y3K:XELL:AAX4:2GTK:E6LM:MPRN:3ZXR:TTMR Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:29 OomKillDisable:true NGoroutines:52 SystemTime:2024-06-20 17:01:28.754457779 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1063-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aar
ch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214900736 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-30-239 Labels:[] ExperimentalBuild:false ServerVersion:26.1.4 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d2d58213f83a351ca8f528a95fbd145f5654e957 Expected:d2d58213f83a351ca8f528a95fbd145f5654e957} RuncCommit:{ID:v1.1.12-0-g51d5e94 Expected:v1.1.12-0-g51d5e94} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErro
rs:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.14.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.27.1]] Warnings:<nil>}}
	I0620 17:01:28.763132    7790 start_flags.go:310] no existing cluster config was found, will generate one from the flags 
	I0620 17:01:28.763403    7790 start_flags.go:393] Using suggested 2200MB memory alloc based on sys=7834MB, container=7834MB
	I0620 17:01:28.763566    7790 start_flags.go:929] Wait components to verify : map[apiserver:true system_pods:true]
	I0620 17:01:28.765963    7790 out.go:169] Using Docker driver with root privileges
	I0620 17:01:28.767788    7790 cni.go:84] Creating CNI manager for ""
	I0620 17:01:28.767818    7790 cni.go:162] CNI unnecessary in this configuration, recommending no CNI
	I0620 17:01:28.767897    7790 start.go:340] cluster config:
	{Name:download-only-475314 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.20.0 ClusterName:download-only-475314 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local Co
ntainerRuntime:docker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.20.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I0620 17:01:28.770217    7790 out.go:97] Starting "download-only-475314" primary control-plane node in "download-only-475314" cluster
	I0620 17:01:28.770238    7790 cache.go:121] Beginning downloading kic base image for docker with docker
	I0620 17:01:28.772289    7790 out.go:97] Pulling base image v0.0.44-1718753665-19106 ...
	I0620 17:01:28.772313    7790 preload.go:132] Checking if preload exists for k8s version v1.20.0 and runtime docker
	I0620 17:01:28.772410    7790 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 in local docker daemon
	I0620 17:01:28.786878    7790 cache.go:149] Downloading gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 to local cache
	I0620 17:01:28.787134    7790 image.go:63] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 in local cache directory
	I0620 17:01:28.787243    7790 image.go:118] Writing gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 to local cache
	I0620 17:01:28.838945    7790 preload.go:119] Found remote preload: https://storage.googleapis.com/minikube-preloaded-volume-tarballs/v18/v1.20.0/preloaded-images-k8s-v18-v1.20.0-docker-overlay2-arm64.tar.lz4
	I0620 17:01:28.839042    7790 cache.go:56] Caching tarball of preloaded images
	I0620 17:01:28.839264    7790 preload.go:132] Checking if preload exists for k8s version v1.20.0 and runtime docker
	I0620 17:01:28.842117    7790 out.go:97] Downloading Kubernetes v1.20.0 preload ...
	I0620 17:01:28.842154    7790 preload.go:237] getting checksum for preloaded-images-k8s-v18-v1.20.0-docker-overlay2-arm64.tar.lz4 ...
	I0620 17:01:28.928730    7790 download.go:107] Downloading: https://storage.googleapis.com/minikube-preloaded-volume-tarballs/v18/v1.20.0/preloaded-images-k8s-v18-v1.20.0-docker-overlay2-arm64.tar.lz4?checksum=md5:1a3e8f9b29e6affec63d76d0d3000942 -> /home/jenkins/minikube-integration/19106-2452/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.20.0-docker-overlay2-arm64.tar.lz4
	I0620 17:01:34.187591    7790 preload.go:248] saving checksum for preloaded-images-k8s-v18-v1.20.0-docker-overlay2-arm64.tar.lz4 ...
	I0620 17:01:34.187776    7790 preload.go:255] verifying checksum of /home/jenkins/minikube-integration/19106-2452/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.20.0-docker-overlay2-arm64.tar.lz4 ...
	I0620 17:01:35.177519    7790 cache.go:59] Finished verifying existence of preloaded tar for v1.20.0 on docker
	I0620 17:01:35.177925    7790 profile.go:143] Saving config to /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/download-only-475314/config.json ...
	I0620 17:01:35.177961    7790 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/download-only-475314/config.json: {Name:mk104759fb1ab8df174d4ab93adffc7342dfe6c5 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0620 17:01:35.178122    7790 preload.go:132] Checking if preload exists for k8s version v1.20.0 and runtime docker
	I0620 17:01:35.178290    7790 download.go:107] Downloading: https://dl.k8s.io/release/v1.20.0/bin/linux/arm64/kubectl?checksum=file:https://dl.k8s.io/release/v1.20.0/bin/linux/arm64/kubectl.sha256 -> /home/jenkins/minikube-integration/19106-2452/.minikube/cache/linux/arm64/v1.20.0/kubectl
	
	
	* The control-plane node download-only-475314 host does not exist
	  To start a cluster, run: "minikube start -p download-only-475314"

                                                
                                                
-- /stdout --
aaa_download_only_test.go:185: minikube logs failed with error: exit status 85
--- PASS: TestDownloadOnly/v1.20.0/LogsDuration (0.36s)

                                                
                                    
x
+
TestDownloadOnly/v1.20.0/DeleteAll (0.35s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.20.0/DeleteAll
aaa_download_only_test.go:197: (dbg) Run:  out/minikube-linux-arm64 delete --all
--- PASS: TestDownloadOnly/v1.20.0/DeleteAll (0.35s)

                                                
                                    
x
+
TestDownloadOnly/v1.20.0/DeleteAlwaysSucceeds (0.22s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.20.0/DeleteAlwaysSucceeds
aaa_download_only_test.go:208: (dbg) Run:  out/minikube-linux-arm64 delete -p download-only-475314
--- PASS: TestDownloadOnly/v1.20.0/DeleteAlwaysSucceeds (0.22s)

                                                
                                    
x
+
TestDownloadOnly/v1.30.2/json-events (5.35s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.30.2/json-events
aaa_download_only_test.go:81: (dbg) Run:  out/minikube-linux-arm64 start -o=json --download-only -p download-only-648762 --force --alsologtostderr --kubernetes-version=v1.30.2 --container-runtime=docker --driver=docker  --container-runtime=docker
aaa_download_only_test.go:81: (dbg) Done: out/minikube-linux-arm64 start -o=json --download-only -p download-only-648762 --force --alsologtostderr --kubernetes-version=v1.30.2 --container-runtime=docker --driver=docker  --container-runtime=docker: (5.348373529s)
--- PASS: TestDownloadOnly/v1.30.2/json-events (5.35s)

                                                
                                    
x
+
TestDownloadOnly/v1.30.2/preload-exists (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.30.2/preload-exists
--- PASS: TestDownloadOnly/v1.30.2/preload-exists (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.30.2/LogsDuration (0.07s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.30.2/LogsDuration
aaa_download_only_test.go:184: (dbg) Run:  out/minikube-linux-arm64 logs -p download-only-648762
aaa_download_only_test.go:184: (dbg) Non-zero exit: out/minikube-linux-arm64 logs -p download-only-648762: exit status 85 (67.074219ms)

                                                
                                                
-- stdout --
	
	==> Audit <==
	|---------|--------------------------------|----------------------|---------|---------|---------------------|---------------------|
	| Command |              Args              |       Profile        |  User   | Version |     Start Time      |      End Time       |
	|---------|--------------------------------|----------------------|---------|---------|---------------------|---------------------|
	| start   | -o=json --download-only        | download-only-475314 | jenkins | v1.33.1 | 20 Jun 24 17:01 UTC |                     |
	|         | -p download-only-475314        |                      |         |         |                     |                     |
	|         | --force --alsologtostderr      |                      |         |         |                     |                     |
	|         | --kubernetes-version=v1.20.0   |                      |         |         |                     |                     |
	|         | --container-runtime=docker     |                      |         |         |                     |                     |
	|         | --driver=docker                |                      |         |         |                     |                     |
	|         | --container-runtime=docker     |                      |         |         |                     |                     |
	| delete  | --all                          | minikube             | jenkins | v1.33.1 | 20 Jun 24 17:01 UTC | 20 Jun 24 17:01 UTC |
	| delete  | -p download-only-475314        | download-only-475314 | jenkins | v1.33.1 | 20 Jun 24 17:01 UTC | 20 Jun 24 17:01 UTC |
	| start   | -o=json --download-only        | download-only-648762 | jenkins | v1.33.1 | 20 Jun 24 17:01 UTC |                     |
	|         | -p download-only-648762        |                      |         |         |                     |                     |
	|         | --force --alsologtostderr      |                      |         |         |                     |                     |
	|         | --kubernetes-version=v1.30.2   |                      |         |         |                     |                     |
	|         | --container-runtime=docker     |                      |         |         |                     |                     |
	|         | --driver=docker                |                      |         |         |                     |                     |
	|         | --container-runtime=docker     |                      |         |         |                     |                     |
	|---------|--------------------------------|----------------------|---------|---------|---------------------|---------------------|
	
	
	==> Last Start <==
	Log file created at: 2024/06/20 17:01:39
	Running on machine: ip-172-31-30-239
	Binary: Built with gc go1.22.4 for linux/arm64
	Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
	I0620 17:01:39.529315    7963 out.go:291] Setting OutFile to fd 1 ...
	I0620 17:01:39.529722    7963 out.go:338] TERM=,COLORTERM=, which probably does not support color
	I0620 17:01:39.529736    7963 out.go:304] Setting ErrFile to fd 2...
	I0620 17:01:39.529743    7963 out.go:338] TERM=,COLORTERM=, which probably does not support color
	I0620 17:01:39.530016    7963 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19106-2452/.minikube/bin
	I0620 17:01:39.530439    7963 out.go:298] Setting JSON to true
	I0620 17:01:39.531287    7963 start.go:129] hostinfo: {"hostname":"ip-172-31-30-239","uptime":2650,"bootTime":1718900249,"procs":146,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1063-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"92f46a7d-c249-4c12-924a-77f64874c910"}
	I0620 17:01:39.531358    7963 start.go:139] virtualization:  
	I0620 17:01:39.551853    7963 out.go:97] [download-only-648762] minikube v1.33.1 on Ubuntu 20.04 (arm64)
	I0620 17:01:39.552108    7963 notify.go:220] Checking for updates...
	I0620 17:01:39.580387    7963 out.go:169] MINIKUBE_LOCATION=19106
	I0620 17:01:39.602929    7963 out.go:169] MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I0620 17:01:39.633106    7963 out.go:169] KUBECONFIG=/home/jenkins/minikube-integration/19106-2452/kubeconfig
	I0620 17:01:39.660201    7963 out.go:169] MINIKUBE_HOME=/home/jenkins/minikube-integration/19106-2452/.minikube
	I0620 17:01:39.680950    7963 out.go:169] MINIKUBE_BIN=out/minikube-linux-arm64
	W0620 17:01:39.745229    7963 out.go:267] minikube skips various validations when --force is supplied; this may lead to unexpected behavior
	I0620 17:01:39.745542    7963 driver.go:392] Setting default libvirt URI to qemu:///system
	I0620 17:01:39.765179    7963 docker.go:122] docker version: linux-26.1.4:Docker Engine - Community
	I0620 17:01:39.765275    7963 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0620 17:01:39.823619    7963 info.go:266] docker info: {ID:6ZPO:QZND:VNGE:LUKL:4Y3K:XELL:AAX4:2GTK:E6LM:MPRN:3ZXR:TTMR Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:29 OomKillDisable:true NGoroutines:45 SystemTime:2024-06-20 17:01:39.814339922 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1063-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aar
ch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214900736 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-30-239 Labels:[] ExperimentalBuild:false ServerVersion:26.1.4 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d2d58213f83a351ca8f528a95fbd145f5654e957 Expected:d2d58213f83a351ca8f528a95fbd145f5654e957} RuncCommit:{ID:v1.1.12-0-g51d5e94 Expected:v1.1.12-0-g51d5e94} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErro
rs:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.14.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.27.1]] Warnings:<nil>}}
	I0620 17:01:39.823768    7963 docker.go:295] overlay module found
	I0620 17:01:39.826216    7963 out.go:97] Using the docker driver based on user configuration
	I0620 17:01:39.826257    7963 start.go:297] selected driver: docker
	I0620 17:01:39.826263    7963 start.go:901] validating driver "docker" against <nil>
	I0620 17:01:39.826359    7963 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0620 17:01:39.888665    7963 info.go:266] docker info: {ID:6ZPO:QZND:VNGE:LUKL:4Y3K:XELL:AAX4:2GTK:E6LM:MPRN:3ZXR:TTMR Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:29 OomKillDisable:true NGoroutines:45 SystemTime:2024-06-20 17:01:39.87978216 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1063-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aarc
h64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214900736 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-30-239 Labels:[] ExperimentalBuild:false ServerVersion:26.1.4 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d2d58213f83a351ca8f528a95fbd145f5654e957 Expected:d2d58213f83a351ca8f528a95fbd145f5654e957} RuncCommit:{ID:v1.1.12-0-g51d5e94 Expected:v1.1.12-0-g51d5e94} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerError
s:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.14.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.27.1]] Warnings:<nil>}}
	I0620 17:01:39.888823    7963 start_flags.go:310] no existing cluster config was found, will generate one from the flags 
	I0620 17:01:39.889103    7963 start_flags.go:393] Using suggested 2200MB memory alloc based on sys=7834MB, container=7834MB
	I0620 17:01:39.889260    7963 start_flags.go:929] Wait components to verify : map[apiserver:true system_pods:true]
	I0620 17:01:39.891609    7963 out.go:169] Using Docker driver with root privileges
	I0620 17:01:39.893292    7963 cni.go:84] Creating CNI manager for ""
	I0620 17:01:39.893320    7963 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
	I0620 17:01:39.893338    7963 start_flags.go:319] Found "bridge CNI" CNI - setting NetworkPlugin=cni
	I0620 17:01:39.893428    7963 start.go:340] cluster config:
	{Name:download-only-648762 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.30.2 ClusterName:download-only-648762 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local Co
ntainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.30.2 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I0620 17:01:39.895268    7963 out.go:97] Starting "download-only-648762" primary control-plane node in "download-only-648762" cluster
	I0620 17:01:39.895285    7963 cache.go:121] Beginning downloading kic base image for docker with docker
	I0620 17:01:39.897257    7963 out.go:97] Pulling base image v0.0.44-1718753665-19106 ...
	I0620 17:01:39.897279    7963 preload.go:132] Checking if preload exists for k8s version v1.30.2 and runtime docker
	I0620 17:01:39.897438    7963 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 in local docker daemon
	I0620 17:01:39.912404    7963 cache.go:149] Downloading gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 to local cache
	I0620 17:01:39.912548    7963 image.go:63] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 in local cache directory
	I0620 17:01:39.912566    7963 image.go:66] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 in local cache directory, skipping pull
	I0620 17:01:39.912571    7963 image.go:105] gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 exists in cache, skipping pull
	I0620 17:01:39.912579    7963 cache.go:152] successfully saved gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 as a tarball
	I0620 17:01:39.957874    7963 preload.go:119] Found remote preload: https://storage.googleapis.com/minikube-preloaded-volume-tarballs/v18/v1.30.2/preloaded-images-k8s-v18-v1.30.2-docker-overlay2-arm64.tar.lz4
	I0620 17:01:39.957906    7963 cache.go:56] Caching tarball of preloaded images
	I0620 17:01:39.958050    7963 preload.go:132] Checking if preload exists for k8s version v1.30.2 and runtime docker
	I0620 17:01:39.960555    7963 out.go:97] Downloading Kubernetes v1.30.2 preload ...
	I0620 17:01:39.960574    7963 preload.go:237] getting checksum for preloaded-images-k8s-v18-v1.30.2-docker-overlay2-arm64.tar.lz4 ...
	I0620 17:01:40.041844    7963 download.go:107] Downloading: https://storage.googleapis.com/minikube-preloaded-volume-tarballs/v18/v1.30.2/preloaded-images-k8s-v18-v1.30.2-docker-overlay2-arm64.tar.lz4?checksum=md5:3bd37d965c85173ac77cdcc664938efd -> /home/jenkins/minikube-integration/19106-2452/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.30.2-docker-overlay2-arm64.tar.lz4
	I0620 17:01:43.399472    7963 preload.go:248] saving checksum for preloaded-images-k8s-v18-v1.30.2-docker-overlay2-arm64.tar.lz4 ...
	I0620 17:01:43.399638    7963 preload.go:255] verifying checksum of /home/jenkins/minikube-integration/19106-2452/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.30.2-docker-overlay2-arm64.tar.lz4 ...
	I0620 17:01:44.177069    7963 cache.go:59] Finished verifying existence of preloaded tar for v1.30.2 on docker
	I0620 17:01:44.177424    7963 profile.go:143] Saving config to /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/download-only-648762/config.json ...
	I0620 17:01:44.177455    7963 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/download-only-648762/config.json: {Name:mk565bb5e1331cf016834ede42c815983e3bba51 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0620 17:01:44.177642    7963 preload.go:132] Checking if preload exists for k8s version v1.30.2 and runtime docker
	I0620 17:01:44.177789    7963 download.go:107] Downloading: https://dl.k8s.io/release/v1.30.2/bin/linux/arm64/kubectl?checksum=file:https://dl.k8s.io/release/v1.30.2/bin/linux/arm64/kubectl.sha256 -> /home/jenkins/minikube-integration/19106-2452/.minikube/cache/linux/arm64/v1.30.2/kubectl
	
	
	* The control-plane node download-only-648762 host does not exist
	  To start a cluster, run: "minikube start -p download-only-648762"

                                                
                                                
-- /stdout --
aaa_download_only_test.go:185: minikube logs failed with error: exit status 85
--- PASS: TestDownloadOnly/v1.30.2/LogsDuration (0.07s)

                                                
                                    
x
+
TestDownloadOnly/v1.30.2/DeleteAll (0.26s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.30.2/DeleteAll
aaa_download_only_test.go:197: (dbg) Run:  out/minikube-linux-arm64 delete --all
--- PASS: TestDownloadOnly/v1.30.2/DeleteAll (0.26s)

                                                
                                    
x
+
TestDownloadOnly/v1.30.2/DeleteAlwaysSucceeds (0.18s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.30.2/DeleteAlwaysSucceeds
aaa_download_only_test.go:208: (dbg) Run:  out/minikube-linux-arm64 delete -p download-only-648762
--- PASS: TestDownloadOnly/v1.30.2/DeleteAlwaysSucceeds (0.18s)

                                                
                                    
x
+
TestBinaryMirror (0.54s)

                                                
                                                
=== RUN   TestBinaryMirror
aaa_download_only_test.go:314: (dbg) Run:  out/minikube-linux-arm64 start --download-only -p binary-mirror-090394 --alsologtostderr --binary-mirror http://127.0.0.1:40791 --driver=docker  --container-runtime=docker
helpers_test.go:175: Cleaning up "binary-mirror-090394" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p binary-mirror-090394
--- PASS: TestBinaryMirror (0.54s)

                                                
                                    
x
+
TestOffline (58.83s)

                                                
                                                
=== RUN   TestOffline
=== PAUSE TestOffline

                                                
                                                

                                                
                                                
=== CONT  TestOffline
aab_offline_test.go:55: (dbg) Run:  out/minikube-linux-arm64 start -p offline-docker-921453 --alsologtostderr -v=1 --memory=2048 --wait=true --driver=docker  --container-runtime=docker
aab_offline_test.go:55: (dbg) Done: out/minikube-linux-arm64 start -p offline-docker-921453 --alsologtostderr -v=1 --memory=2048 --wait=true --driver=docker  --container-runtime=docker: (56.642569955s)
helpers_test.go:175: Cleaning up "offline-docker-921453" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p offline-docker-921453
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p offline-docker-921453: (2.190390707s)
--- PASS: TestOffline (58.83s)

                                                
                                    
x
+
TestAddons/PreSetup/EnablingAddonOnNonExistingCluster (0.07s)

                                                
                                                
=== RUN   TestAddons/PreSetup/EnablingAddonOnNonExistingCluster
=== PAUSE TestAddons/PreSetup/EnablingAddonOnNonExistingCluster

                                                
                                                

                                                
                                                
=== CONT  TestAddons/PreSetup/EnablingAddonOnNonExistingCluster
addons_test.go:1029: (dbg) Run:  out/minikube-linux-arm64 addons enable dashboard -p addons-705802
addons_test.go:1029: (dbg) Non-zero exit: out/minikube-linux-arm64 addons enable dashboard -p addons-705802: exit status 85 (68.0646ms)

                                                
                                                
-- stdout --
	* Profile "addons-705802" not found. Run "minikube profile list" to view all profiles.
	  To start a cluster, run: "minikube start -p addons-705802"

                                                
                                                
-- /stdout --
--- PASS: TestAddons/PreSetup/EnablingAddonOnNonExistingCluster (0.07s)

                                                
                                    
x
+
TestAddons/PreSetup/DisablingAddonOnNonExistingCluster (0.07s)

                                                
                                                
=== RUN   TestAddons/PreSetup/DisablingAddonOnNonExistingCluster
=== PAUSE TestAddons/PreSetup/DisablingAddonOnNonExistingCluster

                                                
                                                

                                                
                                                
=== CONT  TestAddons/PreSetup/DisablingAddonOnNonExistingCluster
addons_test.go:1040: (dbg) Run:  out/minikube-linux-arm64 addons disable dashboard -p addons-705802
addons_test.go:1040: (dbg) Non-zero exit: out/minikube-linux-arm64 addons disable dashboard -p addons-705802: exit status 85 (65.689313ms)

                                                
                                                
-- stdout --
	* Profile "addons-705802" not found. Run "minikube profile list" to view all profiles.
	  To start a cluster, run: "minikube start -p addons-705802"

                                                
                                                
-- /stdout --
--- PASS: TestAddons/PreSetup/DisablingAddonOnNonExistingCluster (0.07s)

                                                
                                    
x
+
TestAddons/Setup (230.19s)

                                                
                                                
=== RUN   TestAddons/Setup
addons_test.go:110: (dbg) Run:  out/minikube-linux-arm64 start -p addons-705802 --wait=true --memory=4000 --alsologtostderr --addons=registry --addons=metrics-server --addons=volumesnapshots --addons=csi-hostpath-driver --addons=gcp-auth --addons=cloud-spanner --addons=inspektor-gadget --addons=storage-provisioner-rancher --addons=nvidia-device-plugin --addons=yakd --addons=volcano --driver=docker  --container-runtime=docker --addons=ingress --addons=ingress-dns
addons_test.go:110: (dbg) Done: out/minikube-linux-arm64 start -p addons-705802 --wait=true --memory=4000 --alsologtostderr --addons=registry --addons=metrics-server --addons=volumesnapshots --addons=csi-hostpath-driver --addons=gcp-auth --addons=cloud-spanner --addons=inspektor-gadget --addons=storage-provisioner-rancher --addons=nvidia-device-plugin --addons=yakd --addons=volcano --driver=docker  --container-runtime=docker --addons=ingress --addons=ingress-dns: (3m50.186036349s)
--- PASS: TestAddons/Setup (230.19s)

                                                
                                    
x
+
TestAddons/parallel/Registry (15.57s)

                                                
                                                
=== RUN   TestAddons/parallel/Registry
=== PAUSE TestAddons/parallel/Registry

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/Registry
addons_test.go:332: registry stabilized in 34.187315ms
addons_test.go:334: (dbg) TestAddons/parallel/Registry: waiting 6m0s for pods matching "actual-registry=true" in namespace "kube-system" ...
helpers_test.go:344: "registry-xkjrc" [e8fb038a-058a-4a6e-9936-dd674eeb2189] Running
addons_test.go:334: (dbg) TestAddons/parallel/Registry: actual-registry=true healthy within 6.004969791s
addons_test.go:337: (dbg) TestAddons/parallel/Registry: waiting 10m0s for pods matching "registry-proxy=true" in namespace "kube-system" ...
helpers_test.go:344: "registry-proxy-prpxx" [b1d2e1a6-ebd8-4490-94d7-be9a7e2a1920] Running
addons_test.go:337: (dbg) TestAddons/parallel/Registry: registry-proxy=true healthy within 5.013718708s
addons_test.go:342: (dbg) Run:  kubectl --context addons-705802 delete po -l run=registry-test --now
addons_test.go:347: (dbg) Run:  kubectl --context addons-705802 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c "wget --spider -S http://registry.kube-system.svc.cluster.local"
addons_test.go:347: (dbg) Done: kubectl --context addons-705802 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c "wget --spider -S http://registry.kube-system.svc.cluster.local": (3.546050827s)
addons_test.go:361: (dbg) Run:  out/minikube-linux-arm64 -p addons-705802 ip
2024/06/20 17:05:51 [DEBUG] GET http://192.168.49.2:5000
addons_test.go:390: (dbg) Run:  out/minikube-linux-arm64 -p addons-705802 addons disable registry --alsologtostderr -v=1
--- PASS: TestAddons/parallel/Registry (15.57s)

                                                
                                    
x
+
TestAddons/parallel/InspektorGadget (11.73s)

                                                
                                                
=== RUN   TestAddons/parallel/InspektorGadget
=== PAUSE TestAddons/parallel/InspektorGadget

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/InspektorGadget
addons_test.go:840: (dbg) TestAddons/parallel/InspektorGadget: waiting 8m0s for pods matching "k8s-app=gadget" in namespace "gadget" ...
helpers_test.go:344: "gadget-qwnpj" [6c9407ca-97fe-49bc-84df-132a8f316e80] Running / Ready:ContainersNotReady (containers with unready status: [gadget]) / ContainersReady:ContainersNotReady (containers with unready status: [gadget])
addons_test.go:840: (dbg) TestAddons/parallel/InspektorGadget: k8s-app=gadget healthy within 6.003848266s
addons_test.go:843: (dbg) Run:  out/minikube-linux-arm64 addons disable inspektor-gadget -p addons-705802
addons_test.go:843: (dbg) Done: out/minikube-linux-arm64 addons disable inspektor-gadget -p addons-705802: (5.720822429s)
--- PASS: TestAddons/parallel/InspektorGadget (11.73s)

                                                
                                    
x
+
TestAddons/parallel/MetricsServer (5.74s)

                                                
                                                
=== RUN   TestAddons/parallel/MetricsServer
=== PAUSE TestAddons/parallel/MetricsServer

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/MetricsServer
addons_test.go:409: metrics-server stabilized in 2.665392ms
addons_test.go:411: (dbg) TestAddons/parallel/MetricsServer: waiting 6m0s for pods matching "k8s-app=metrics-server" in namespace "kube-system" ...
helpers_test.go:344: "metrics-server-c59844bb4-q78st" [c7a25cfe-a4af-4cb4-abb1-0099ff026057] Running
addons_test.go:411: (dbg) TestAddons/parallel/MetricsServer: k8s-app=metrics-server healthy within 5.004801477s
addons_test.go:417: (dbg) Run:  kubectl --context addons-705802 top pods -n kube-system
addons_test.go:434: (dbg) Run:  out/minikube-linux-arm64 -p addons-705802 addons disable metrics-server --alsologtostderr -v=1
--- PASS: TestAddons/parallel/MetricsServer (5.74s)

                                                
                                    
x
+
TestAddons/parallel/CSI (67.54s)

                                                
                                                
=== RUN   TestAddons/parallel/CSI
=== PAUSE TestAddons/parallel/CSI

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/CSI
addons_test.go:563: csi-hostpath-driver pods stabilized in 7.167239ms
addons_test.go:566: (dbg) Run:  kubectl --context addons-705802 create -f testdata/csi-hostpath-driver/pvc.yaml
addons_test.go:571: (dbg) TestAddons/parallel/CSI: waiting 6m0s for pvc "hpvc" in namespace "default" ...
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc -o jsonpath={.status.phase} -n default
addons_test.go:576: (dbg) Run:  kubectl --context addons-705802 create -f testdata/csi-hostpath-driver/pv-pod.yaml
addons_test.go:581: (dbg) TestAddons/parallel/CSI: waiting 6m0s for pods matching "app=task-pv-pod" in namespace "default" ...
helpers_test.go:344: "task-pv-pod" [906d69cd-992b-4e18-bdd0-a6e32d01153e] Pending
helpers_test.go:344: "task-pv-pod" [906d69cd-992b-4e18-bdd0-a6e32d01153e] Pending / Ready:ContainersNotReady (containers with unready status: [task-pv-container]) / ContainersReady:ContainersNotReady (containers with unready status: [task-pv-container])
helpers_test.go:344: "task-pv-pod" [906d69cd-992b-4e18-bdd0-a6e32d01153e] Running
addons_test.go:581: (dbg) TestAddons/parallel/CSI: app=task-pv-pod healthy within 8.003569068s
addons_test.go:586: (dbg) Run:  kubectl --context addons-705802 create -f testdata/csi-hostpath-driver/snapshot.yaml
addons_test.go:591: (dbg) TestAddons/parallel/CSI: waiting 6m0s for volume snapshot "new-snapshot-demo" in namespace "default" ...
helpers_test.go:419: (dbg) Run:  kubectl --context addons-705802 get volumesnapshot new-snapshot-demo -o jsonpath={.status.readyToUse} -n default
helpers_test.go:419: (dbg) Run:  kubectl --context addons-705802 get volumesnapshot new-snapshot-demo -o jsonpath={.status.readyToUse} -n default
addons_test.go:596: (dbg) Run:  kubectl --context addons-705802 delete pod task-pv-pod
addons_test.go:596: (dbg) Done: kubectl --context addons-705802 delete pod task-pv-pod: (1.450730086s)
addons_test.go:602: (dbg) Run:  kubectl --context addons-705802 delete pvc hpvc
addons_test.go:608: (dbg) Run:  kubectl --context addons-705802 create -f testdata/csi-hostpath-driver/pvc-restore.yaml
addons_test.go:613: (dbg) TestAddons/parallel/CSI: waiting 6m0s for pvc "hpvc-restore" in namespace "default" ...
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
addons_test.go:618: (dbg) Run:  kubectl --context addons-705802 create -f testdata/csi-hostpath-driver/pv-pod-restore.yaml
addons_test.go:623: (dbg) TestAddons/parallel/CSI: waiting 6m0s for pods matching "app=task-pv-pod-restore" in namespace "default" ...
helpers_test.go:344: "task-pv-pod-restore" [59d28e1b-61a8-4e0a-b6c0-0248a808139f] Pending
helpers_test.go:344: "task-pv-pod-restore" [59d28e1b-61a8-4e0a-b6c0-0248a808139f] Pending / Ready:ContainersNotReady (containers with unready status: [task-pv-container]) / ContainersReady:ContainersNotReady (containers with unready status: [task-pv-container])
helpers_test.go:344: "task-pv-pod-restore" [59d28e1b-61a8-4e0a-b6c0-0248a808139f] Running
addons_test.go:623: (dbg) TestAddons/parallel/CSI: app=task-pv-pod-restore healthy within 8.005569587s
addons_test.go:628: (dbg) Run:  kubectl --context addons-705802 delete pod task-pv-pod-restore
addons_test.go:632: (dbg) Run:  kubectl --context addons-705802 delete pvc hpvc-restore
addons_test.go:636: (dbg) Run:  kubectl --context addons-705802 delete volumesnapshot new-snapshot-demo
addons_test.go:640: (dbg) Run:  out/minikube-linux-arm64 -p addons-705802 addons disable csi-hostpath-driver --alsologtostderr -v=1
addons_test.go:640: (dbg) Done: out/minikube-linux-arm64 -p addons-705802 addons disable csi-hostpath-driver --alsologtostderr -v=1: (6.735217881s)
addons_test.go:644: (dbg) Run:  out/minikube-linux-arm64 -p addons-705802 addons disable volumesnapshots --alsologtostderr -v=1
--- PASS: TestAddons/parallel/CSI (67.54s)

                                                
                                    
x
+
TestAddons/parallel/Headlamp (11.96s)

                                                
                                                
=== RUN   TestAddons/parallel/Headlamp
=== PAUSE TestAddons/parallel/Headlamp

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/Headlamp
addons_test.go:826: (dbg) Run:  out/minikube-linux-arm64 addons enable headlamp -p addons-705802 --alsologtostderr -v=1
addons_test.go:831: (dbg) TestAddons/parallel/Headlamp: waiting 8m0s for pods matching "app.kubernetes.io/name=headlamp" in namespace "headlamp" ...
helpers_test.go:344: "headlamp-7fc69f7444-l88s9" [197acad2-cc45-4a99-9bc1-8ea83fa596e5] Pending
helpers_test.go:344: "headlamp-7fc69f7444-l88s9" [197acad2-cc45-4a99-9bc1-8ea83fa596e5] Pending / Ready:ContainersNotReady (containers with unready status: [headlamp]) / ContainersReady:ContainersNotReady (containers with unready status: [headlamp])
helpers_test.go:344: "headlamp-7fc69f7444-l88s9" [197acad2-cc45-4a99-9bc1-8ea83fa596e5] Running
addons_test.go:831: (dbg) TestAddons/parallel/Headlamp: app.kubernetes.io/name=headlamp healthy within 11.003031182s
--- PASS: TestAddons/parallel/Headlamp (11.96s)

                                                
                                    
x
+
TestAddons/parallel/CloudSpanner (5.51s)

                                                
                                                
=== RUN   TestAddons/parallel/CloudSpanner
=== PAUSE TestAddons/parallel/CloudSpanner

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/CloudSpanner
addons_test.go:859: (dbg) TestAddons/parallel/CloudSpanner: waiting 6m0s for pods matching "app=cloud-spanner-emulator" in namespace "default" ...
helpers_test.go:344: "cloud-spanner-emulator-6fcd4f6f98-6gdxz" [1727c663-b4bf-483f-96ba-ab009f6be08a] Running
addons_test.go:859: (dbg) TestAddons/parallel/CloudSpanner: app=cloud-spanner-emulator healthy within 5.004044976s
addons_test.go:862: (dbg) Run:  out/minikube-linux-arm64 addons disable cloud-spanner -p addons-705802
--- PASS: TestAddons/parallel/CloudSpanner (5.51s)

                                                
                                    
x
+
TestAddons/parallel/LocalPath (55.44s)

                                                
                                                
=== RUN   TestAddons/parallel/LocalPath
=== PAUSE TestAddons/parallel/LocalPath

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/LocalPath
addons_test.go:974: (dbg) Run:  kubectl --context addons-705802 apply -f testdata/storage-provisioner-rancher/pvc.yaml
addons_test.go:980: (dbg) Run:  kubectl --context addons-705802 apply -f testdata/storage-provisioner-rancher/pod.yaml
addons_test.go:984: (dbg) TestAddons/parallel/LocalPath: waiting 5m0s for pvc "test-pvc" in namespace "default" ...
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-705802 get pvc test-pvc -o jsonpath={.status.phase} -n default
addons_test.go:987: (dbg) TestAddons/parallel/LocalPath: waiting 3m0s for pods matching "run=test-local-path" in namespace "default" ...
helpers_test.go:344: "test-local-path" [2fcae0f7-e17b-4706-97e9-bf0e94361f90] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:344: "test-local-path" [2fcae0f7-e17b-4706-97e9-bf0e94361f90] Pending / Initialized:PodCompleted / Ready:PodCompleted / ContainersReady:PodCompleted
helpers_test.go:344: "test-local-path" [2fcae0f7-e17b-4706-97e9-bf0e94361f90] Succeeded / Initialized:PodCompleted / Ready:PodCompleted / ContainersReady:PodCompleted
addons_test.go:987: (dbg) TestAddons/parallel/LocalPath: run=test-local-path healthy within 5.003607683s
addons_test.go:992: (dbg) Run:  kubectl --context addons-705802 get pvc test-pvc -o=json
addons_test.go:1001: (dbg) Run:  out/minikube-linux-arm64 -p addons-705802 ssh "cat /opt/local-path-provisioner/pvc-d4a82456-6a8b-4314-80b4-9dc88708fba6_default_test-pvc/file1"
addons_test.go:1013: (dbg) Run:  kubectl --context addons-705802 delete pod test-local-path
addons_test.go:1017: (dbg) Run:  kubectl --context addons-705802 delete pvc test-pvc
addons_test.go:1021: (dbg) Run:  out/minikube-linux-arm64 -p addons-705802 addons disable storage-provisioner-rancher --alsologtostderr -v=1
addons_test.go:1021: (dbg) Done: out/minikube-linux-arm64 -p addons-705802 addons disable storage-provisioner-rancher --alsologtostderr -v=1: (43.167198361s)
--- PASS: TestAddons/parallel/LocalPath (55.44s)

                                                
                                    
x
+
TestAddons/parallel/NvidiaDevicePlugin (5.45s)

                                                
                                                
=== RUN   TestAddons/parallel/NvidiaDevicePlugin
=== PAUSE TestAddons/parallel/NvidiaDevicePlugin

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/NvidiaDevicePlugin
addons_test.go:1053: (dbg) TestAddons/parallel/NvidiaDevicePlugin: waiting 6m0s for pods matching "name=nvidia-device-plugin-ds" in namespace "kube-system" ...
helpers_test.go:344: "nvidia-device-plugin-daemonset-kxmqr" [3f82c240-bb27-4a72-a3f6-7af8eb39a770] Running
addons_test.go:1053: (dbg) TestAddons/parallel/NvidiaDevicePlugin: name=nvidia-device-plugin-ds healthy within 5.004381732s
addons_test.go:1056: (dbg) Run:  out/minikube-linux-arm64 addons disable nvidia-device-plugin -p addons-705802
--- PASS: TestAddons/parallel/NvidiaDevicePlugin (5.45s)

                                                
                                    
x
+
TestAddons/parallel/Yakd (6.01s)

                                                
                                                
=== RUN   TestAddons/parallel/Yakd
=== PAUSE TestAddons/parallel/Yakd

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/Yakd
addons_test.go:1064: (dbg) TestAddons/parallel/Yakd: waiting 2m0s for pods matching "app.kubernetes.io/name=yakd-dashboard" in namespace "yakd-dashboard" ...
helpers_test.go:344: "yakd-dashboard-5ddbf7d777-qvpwn" [eedc09d3-976b-4853-bf9c-2418b8e03c38] Running
addons_test.go:1064: (dbg) TestAddons/parallel/Yakd: app.kubernetes.io/name=yakd-dashboard healthy within 6.004168302s
--- PASS: TestAddons/parallel/Yakd (6.01s)

                                                
                                    
x
+
TestAddons/parallel/Volcano (40.45s)

                                                
                                                
=== RUN   TestAddons/parallel/Volcano
=== PAUSE TestAddons/parallel/Volcano

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/Volcano
addons_test.go:889: volcano-scheduler stabilized in 7.177229ms
addons_test.go:897: volcano-admission stabilized in 8.015606ms
addons_test.go:905: volcano-controller stabilized in 8.381548ms
addons_test.go:911: (dbg) TestAddons/parallel/Volcano: waiting 6m0s for pods matching "app=volcano-scheduler" in namespace "volcano-system" ...
helpers_test.go:344: "volcano-scheduler-765f888978-n99dg" [4fde3635-9dfc-49f3-b81f-b5e79fd0f36c] Running
addons_test.go:911: (dbg) TestAddons/parallel/Volcano: app=volcano-scheduler healthy within 6.003587825s
addons_test.go:915: (dbg) TestAddons/parallel/Volcano: waiting 6m0s for pods matching "app=volcano-admission" in namespace "volcano-system" ...
helpers_test.go:344: "volcano-admission-7b497cf95b-tcbt4" [5118c061-009b-4439-84ee-970d86b73c27] Running
addons_test.go:915: (dbg) TestAddons/parallel/Volcano: app=volcano-admission healthy within 5.00445187s
addons_test.go:919: (dbg) TestAddons/parallel/Volcano: waiting 6m0s for pods matching "app=volcano-controller" in namespace "volcano-system" ...
helpers_test.go:344: "volcano-controller-86c5446455-p2mlp" [dc2dab41-07d4-46f3-be65-8850866fde97] Running
addons_test.go:919: (dbg) TestAddons/parallel/Volcano: app=volcano-controller healthy within 5.003866424s
addons_test.go:924: (dbg) Run:  kubectl --context addons-705802 delete -n volcano-system job volcano-admission-init
addons_test.go:930: (dbg) Run:  kubectl --context addons-705802 create -f testdata/vcjob.yaml
addons_test.go:938: (dbg) Run:  kubectl --context addons-705802 get vcjob -n my-volcano
addons_test.go:956: (dbg) TestAddons/parallel/Volcano: waiting 3m0s for pods matching "volcano.sh/job-name=test-job" in namespace "my-volcano" ...
helpers_test.go:344: "test-job-nginx-0" [fde4182f-31ae-4417-a3f2-3e8189396cac] Pending
helpers_test.go:344: "test-job-nginx-0" [fde4182f-31ae-4417-a3f2-3e8189396cac] Pending / Ready:ContainersNotReady (containers with unready status: [nginx]) / ContainersReady:ContainersNotReady (containers with unready status: [nginx])
helpers_test.go:344: "test-job-nginx-0" [fde4182f-31ae-4417-a3f2-3e8189396cac] Running
addons_test.go:956: (dbg) TestAddons/parallel/Volcano: volcano.sh/job-name=test-job healthy within 14.003310843s
addons_test.go:960: (dbg) Run:  out/minikube-linux-arm64 -p addons-705802 addons disable volcano --alsologtostderr -v=1
addons_test.go:960: (dbg) Done: out/minikube-linux-arm64 -p addons-705802 addons disable volcano --alsologtostderr -v=1: (9.809044954s)
--- PASS: TestAddons/parallel/Volcano (40.45s)

                                                
                                    
x
+
TestAddons/serial/GCPAuth/Namespaces (0.17s)

                                                
                                                
=== RUN   TestAddons/serial/GCPAuth/Namespaces
addons_test.go:652: (dbg) Run:  kubectl --context addons-705802 create ns new-namespace
addons_test.go:666: (dbg) Run:  kubectl --context addons-705802 get secret gcp-auth -n new-namespace
--- PASS: TestAddons/serial/GCPAuth/Namespaces (0.17s)

                                                
                                    
x
+
TestAddons/StoppedEnableDisable (11.12s)

                                                
                                                
=== RUN   TestAddons/StoppedEnableDisable
addons_test.go:174: (dbg) Run:  out/minikube-linux-arm64 stop -p addons-705802
addons_test.go:174: (dbg) Done: out/minikube-linux-arm64 stop -p addons-705802: (10.86026375s)
addons_test.go:178: (dbg) Run:  out/minikube-linux-arm64 addons enable dashboard -p addons-705802
addons_test.go:182: (dbg) Run:  out/minikube-linux-arm64 addons disable dashboard -p addons-705802
addons_test.go:187: (dbg) Run:  out/minikube-linux-arm64 addons disable gvisor -p addons-705802
--- PASS: TestAddons/StoppedEnableDisable (11.12s)

                                                
                                    
x
+
TestCertOptions (41.42s)

                                                
                                                
=== RUN   TestCertOptions
=== PAUSE TestCertOptions

                                                
                                                

                                                
                                                
=== CONT  TestCertOptions
cert_options_test.go:49: (dbg) Run:  out/minikube-linux-arm64 start -p cert-options-406193 --memory=2048 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=docker  --container-runtime=docker
E0620 17:45:36.958155    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt: no such file or directory
cert_options_test.go:49: (dbg) Done: out/minikube-linux-arm64 start -p cert-options-406193 --memory=2048 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=docker  --container-runtime=docker: (38.649897938s)
cert_options_test.go:60: (dbg) Run:  out/minikube-linux-arm64 -p cert-options-406193 ssh "openssl x509 -text -noout -in /var/lib/minikube/certs/apiserver.crt"
cert_options_test.go:88: (dbg) Run:  kubectl --context cert-options-406193 config view
cert_options_test.go:100: (dbg) Run:  out/minikube-linux-arm64 ssh -p cert-options-406193 -- "sudo cat /etc/kubernetes/admin.conf"
helpers_test.go:175: Cleaning up "cert-options-406193" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p cert-options-406193
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p cert-options-406193: (2.116597135s)
--- PASS: TestCertOptions (41.42s)

                                                
                                    
x
+
TestCertExpiration (249.66s)

                                                
                                                
=== RUN   TestCertExpiration
=== PAUSE TestCertExpiration

                                                
                                                

                                                
                                                
=== CONT  TestCertExpiration
cert_options_test.go:123: (dbg) Run:  out/minikube-linux-arm64 start -p cert-expiration-278504 --memory=2048 --cert-expiration=3m --driver=docker  --container-runtime=docker
cert_options_test.go:123: (dbg) Done: out/minikube-linux-arm64 start -p cert-expiration-278504 --memory=2048 --cert-expiration=3m --driver=docker  --container-runtime=docker: (45.413969385s)
cert_options_test.go:131: (dbg) Run:  out/minikube-linux-arm64 start -p cert-expiration-278504 --memory=2048 --cert-expiration=8760h --driver=docker  --container-runtime=docker
E0620 17:48:57.044402    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/skaffold-187542/client.crt: no such file or directory
cert_options_test.go:131: (dbg) Done: out/minikube-linux-arm64 start -p cert-expiration-278504 --memory=2048 --cert-expiration=8760h --driver=docker  --container-runtime=docker: (21.936542348s)
helpers_test.go:175: Cleaning up "cert-expiration-278504" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p cert-expiration-278504
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p cert-expiration-278504: (2.308544306s)
--- PASS: TestCertExpiration (249.66s)

                                                
                                    
x
+
TestDockerFlags (48.55s)

                                                
                                                
=== RUN   TestDockerFlags
=== PAUSE TestDockerFlags

                                                
                                                

                                                
                                                
=== CONT  TestDockerFlags
docker_test.go:51: (dbg) Run:  out/minikube-linux-arm64 start -p docker-flags-766020 --cache-images=false --memory=2048 --install-addons=false --wait=false --docker-env=FOO=BAR --docker-env=BAZ=BAT --docker-opt=debug --docker-opt=icc=true --alsologtostderr -v=5 --driver=docker  --container-runtime=docker
docker_test.go:51: (dbg) Done: out/minikube-linux-arm64 start -p docker-flags-766020 --cache-images=false --memory=2048 --install-addons=false --wait=false --docker-env=FOO=BAR --docker-env=BAZ=BAT --docker-opt=debug --docker-opt=icc=true --alsologtostderr -v=5 --driver=docker  --container-runtime=docker: (45.579461997s)
docker_test.go:56: (dbg) Run:  out/minikube-linux-arm64 -p docker-flags-766020 ssh "sudo systemctl show docker --property=Environment --no-pager"
docker_test.go:67: (dbg) Run:  out/minikube-linux-arm64 -p docker-flags-766020 ssh "sudo systemctl show docker --property=ExecStart --no-pager"
helpers_test.go:175: Cleaning up "docker-flags-766020" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p docker-flags-766020
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p docker-flags-766020: (2.209847405s)
--- PASS: TestDockerFlags (48.55s)

                                                
                                    
x
+
TestForceSystemdFlag (42.45s)

                                                
                                                
=== RUN   TestForceSystemdFlag
=== PAUSE TestForceSystemdFlag

                                                
                                                

                                                
                                                
=== CONT  TestForceSystemdFlag
docker_test.go:91: (dbg) Run:  out/minikube-linux-arm64 start -p force-systemd-flag-424797 --memory=2048 --force-systemd --alsologtostderr -v=5 --driver=docker  --container-runtime=docker
docker_test.go:91: (dbg) Done: out/minikube-linux-arm64 start -p force-systemd-flag-424797 --memory=2048 --force-systemd --alsologtostderr -v=5 --driver=docker  --container-runtime=docker: (39.956152927s)
docker_test.go:110: (dbg) Run:  out/minikube-linux-arm64 -p force-systemd-flag-424797 ssh "docker info --format {{.CgroupDriver}}"
helpers_test.go:175: Cleaning up "force-systemd-flag-424797" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p force-systemd-flag-424797
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p force-systemd-flag-424797: (2.068673289s)
--- PASS: TestForceSystemdFlag (42.45s)

                                                
                                    
x
+
TestForceSystemdEnv (42.69s)

                                                
                                                
=== RUN   TestForceSystemdEnv
=== PAUSE TestForceSystemdEnv

                                                
                                                

                                                
                                                
=== CONT  TestForceSystemdEnv
docker_test.go:155: (dbg) Run:  out/minikube-linux-arm64 start -p force-systemd-env-537035 --memory=2048 --alsologtostderr -v=5 --driver=docker  --container-runtime=docker
docker_test.go:155: (dbg) Done: out/minikube-linux-arm64 start -p force-systemd-env-537035 --memory=2048 --alsologtostderr -v=5 --driver=docker  --container-runtime=docker: (40.061161821s)
docker_test.go:110: (dbg) Run:  out/minikube-linux-arm64 -p force-systemd-env-537035 ssh "docker info --format {{.CgroupDriver}}"
helpers_test.go:175: Cleaning up "force-systemd-env-537035" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p force-systemd-env-537035
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p force-systemd-env-537035: (2.175143533s)
--- PASS: TestForceSystemdEnv (42.69s)

                                                
                                    
x
+
TestErrorSpam/setup (31.7s)

                                                
                                                
=== RUN   TestErrorSpam/setup
error_spam_test.go:81: (dbg) Run:  out/minikube-linux-arm64 start -p nospam-928938 -n=1 --memory=2250 --wait=false --log_dir=/tmp/nospam-928938 --driver=docker  --container-runtime=docker
error_spam_test.go:81: (dbg) Done: out/minikube-linux-arm64 start -p nospam-928938 -n=1 --memory=2250 --wait=false --log_dir=/tmp/nospam-928938 --driver=docker  --container-runtime=docker: (31.691862938s)
--- PASS: TestErrorSpam/setup (31.70s)

                                                
                                    
x
+
TestErrorSpam/start (0.7s)

                                                
                                                
=== RUN   TestErrorSpam/start
error_spam_test.go:216: Cleaning up 1 logfile(s) ...
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-arm64 -p nospam-928938 --log_dir /tmp/nospam-928938 start --dry-run
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-arm64 -p nospam-928938 --log_dir /tmp/nospam-928938 start --dry-run
error_spam_test.go:182: (dbg) Run:  out/minikube-linux-arm64 -p nospam-928938 --log_dir /tmp/nospam-928938 start --dry-run
--- PASS: TestErrorSpam/start (0.70s)

                                                
                                    
x
+
TestErrorSpam/status (0.96s)

                                                
                                                
=== RUN   TestErrorSpam/status
error_spam_test.go:216: Cleaning up 0 logfile(s) ...
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-arm64 -p nospam-928938 --log_dir /tmp/nospam-928938 status
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-arm64 -p nospam-928938 --log_dir /tmp/nospam-928938 status
error_spam_test.go:182: (dbg) Run:  out/minikube-linux-arm64 -p nospam-928938 --log_dir /tmp/nospam-928938 status
--- PASS: TestErrorSpam/status (0.96s)

                                                
                                    
x
+
TestErrorSpam/pause (1.45s)

                                                
                                                
=== RUN   TestErrorSpam/pause
error_spam_test.go:216: Cleaning up 0 logfile(s) ...
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-arm64 -p nospam-928938 --log_dir /tmp/nospam-928938 pause
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-arm64 -p nospam-928938 --log_dir /tmp/nospam-928938 pause
error_spam_test.go:182: (dbg) Run:  out/minikube-linux-arm64 -p nospam-928938 --log_dir /tmp/nospam-928938 pause
--- PASS: TestErrorSpam/pause (1.45s)

                                                
                                    
x
+
TestErrorSpam/unpause (1.37s)

                                                
                                                
=== RUN   TestErrorSpam/unpause
error_spam_test.go:216: Cleaning up 0 logfile(s) ...
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-arm64 -p nospam-928938 --log_dir /tmp/nospam-928938 unpause
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-arm64 -p nospam-928938 --log_dir /tmp/nospam-928938 unpause
error_spam_test.go:182: (dbg) Run:  out/minikube-linux-arm64 -p nospam-928938 --log_dir /tmp/nospam-928938 unpause
--- PASS: TestErrorSpam/unpause (1.37s)

                                                
                                    
x
+
TestErrorSpam/stop (10.98s)

                                                
                                                
=== RUN   TestErrorSpam/stop
error_spam_test.go:216: Cleaning up 0 logfile(s) ...
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-arm64 -p nospam-928938 --log_dir /tmp/nospam-928938 stop
error_spam_test.go:159: (dbg) Done: out/minikube-linux-arm64 -p nospam-928938 --log_dir /tmp/nospam-928938 stop: (10.786689651s)
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-arm64 -p nospam-928938 --log_dir /tmp/nospam-928938 stop
error_spam_test.go:182: (dbg) Run:  out/minikube-linux-arm64 -p nospam-928938 --log_dir /tmp/nospam-928938 stop
--- PASS: TestErrorSpam/stop (10.98s)

                                                
                                    
x
+
TestFunctional/serial/CopySyncFile (0s)

                                                
                                                
=== RUN   TestFunctional/serial/CopySyncFile
functional_test.go:1851: local sync path: /home/jenkins/minikube-integration/19106-2452/.minikube/files/etc/test/nested/copy/7784/hosts
--- PASS: TestFunctional/serial/CopySyncFile (0.00s)

                                                
                                    
x
+
TestFunctional/serial/StartWithProxy (48.73s)

                                                
                                                
=== RUN   TestFunctional/serial/StartWithProxy
functional_test.go:2230: (dbg) Run:  out/minikube-linux-arm64 start -p functional-493344 --memory=4000 --apiserver-port=8441 --wait=all --driver=docker  --container-runtime=docker
functional_test.go:2230: (dbg) Done: out/minikube-linux-arm64 start -p functional-493344 --memory=4000 --apiserver-port=8441 --wait=all --driver=docker  --container-runtime=docker: (48.727244124s)
--- PASS: TestFunctional/serial/StartWithProxy (48.73s)

                                                
                                    
x
+
TestFunctional/serial/AuditLog (0s)

                                                
                                                
=== RUN   TestFunctional/serial/AuditLog
--- PASS: TestFunctional/serial/AuditLog (0.00s)

                                                
                                    
x
+
TestFunctional/serial/SoftStart (29.11s)

                                                
                                                
=== RUN   TestFunctional/serial/SoftStart
functional_test.go:655: (dbg) Run:  out/minikube-linux-arm64 start -p functional-493344 --alsologtostderr -v=8
functional_test.go:655: (dbg) Done: out/minikube-linux-arm64 start -p functional-493344 --alsologtostderr -v=8: (29.111045124s)
functional_test.go:659: soft start took 29.112905083s for "functional-493344" cluster.
--- PASS: TestFunctional/serial/SoftStart (29.11s)

                                                
                                    
x
+
TestFunctional/serial/KubeContext (0.06s)

                                                
                                                
=== RUN   TestFunctional/serial/KubeContext
functional_test.go:677: (dbg) Run:  kubectl config current-context
--- PASS: TestFunctional/serial/KubeContext (0.06s)

                                                
                                    
x
+
TestFunctional/serial/KubectlGetPods (0.11s)

                                                
                                                
=== RUN   TestFunctional/serial/KubectlGetPods
functional_test.go:692: (dbg) Run:  kubectl --context functional-493344 get po -A
--- PASS: TestFunctional/serial/KubectlGetPods (0.11s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/add_remote (3.34s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/add_remote
functional_test.go:1045: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 cache add registry.k8s.io/pause:3.1
functional_test.go:1045: (dbg) Done: out/minikube-linux-arm64 -p functional-493344 cache add registry.k8s.io/pause:3.1: (1.115730524s)
functional_test.go:1045: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 cache add registry.k8s.io/pause:3.3
functional_test.go:1045: (dbg) Done: out/minikube-linux-arm64 -p functional-493344 cache add registry.k8s.io/pause:3.3: (1.171310363s)
functional_test.go:1045: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 cache add registry.k8s.io/pause:latest
functional_test.go:1045: (dbg) Done: out/minikube-linux-arm64 -p functional-493344 cache add registry.k8s.io/pause:latest: (1.047965392s)
--- PASS: TestFunctional/serial/CacheCmd/cache/add_remote (3.34s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/add_local (1.06s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/add_local
functional_test.go:1073: (dbg) Run:  docker build -t minikube-local-cache-test:functional-493344 /tmp/TestFunctionalserialCacheCmdcacheadd_local4149453780/001
functional_test.go:1085: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 cache add minikube-local-cache-test:functional-493344
E0620 17:10:36.958160    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt: no such file or directory
E0620 17:10:36.965580    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt: no such file or directory
E0620 17:10:36.975784    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt: no such file or directory
E0620 17:10:36.996076    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt: no such file or directory
E0620 17:10:37.036354    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt: no such file or directory
functional_test.go:1090: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 cache delete minikube-local-cache-test:functional-493344
E0620 17:10:37.116532    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt: no such file or directory
functional_test.go:1079: (dbg) Run:  docker rmi minikube-local-cache-test:functional-493344
--- PASS: TestFunctional/serial/CacheCmd/cache/add_local (1.06s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/CacheDelete (0.08s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/CacheDelete
functional_test.go:1098: (dbg) Run:  out/minikube-linux-arm64 cache delete registry.k8s.io/pause:3.3
--- PASS: TestFunctional/serial/CacheCmd/cache/CacheDelete (0.08s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/list (0.05s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/list
functional_test.go:1106: (dbg) Run:  out/minikube-linux-arm64 cache list
E0620 17:10:37.277184    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt: no such file or directory
--- PASS: TestFunctional/serial/CacheCmd/cache/list (0.05s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/verify_cache_inside_node (0.33s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/verify_cache_inside_node
functional_test.go:1120: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 ssh sudo crictl images
E0620 17:10:37.597583    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt: no such file or directory
--- PASS: TestFunctional/serial/CacheCmd/cache/verify_cache_inside_node (0.33s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/cache_reload (1.64s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/cache_reload
functional_test.go:1143: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 ssh sudo docker rmi registry.k8s.io/pause:latest
functional_test.go:1149: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 ssh sudo crictl inspecti registry.k8s.io/pause:latest
E0620 17:10:38.237764    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt: no such file or directory
functional_test.go:1149: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-493344 ssh sudo crictl inspecti registry.k8s.io/pause:latest: exit status 1 (317.875918ms)

                                                
                                                
-- stdout --
	FATA[0000] no such image "registry.k8s.io/pause:latest" present 

                                                
                                                
-- /stdout --
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
functional_test.go:1154: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 cache reload
functional_test.go:1159: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 ssh sudo crictl inspecti registry.k8s.io/pause:latest
--- PASS: TestFunctional/serial/CacheCmd/cache/cache_reload (1.64s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/delete (0.11s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/delete
functional_test.go:1168: (dbg) Run:  out/minikube-linux-arm64 cache delete registry.k8s.io/pause:3.1
functional_test.go:1168: (dbg) Run:  out/minikube-linux-arm64 cache delete registry.k8s.io/pause:latest
--- PASS: TestFunctional/serial/CacheCmd/cache/delete (0.11s)

                                                
                                    
x
+
TestFunctional/serial/MinikubeKubectlCmd (0.15s)

                                                
                                                
=== RUN   TestFunctional/serial/MinikubeKubectlCmd
functional_test.go:712: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 kubectl -- --context functional-493344 get pods
E0620 17:10:39.518858    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt: no such file or directory
--- PASS: TestFunctional/serial/MinikubeKubectlCmd (0.15s)

                                                
                                    
x
+
TestFunctional/serial/MinikubeKubectlCmdDirectly (0.13s)

                                                
                                                
=== RUN   TestFunctional/serial/MinikubeKubectlCmdDirectly
functional_test.go:737: (dbg) Run:  out/kubectl --context functional-493344 get pods
--- PASS: TestFunctional/serial/MinikubeKubectlCmdDirectly (0.13s)

                                                
                                    
x
+
TestFunctional/serial/ExtraConfig (44.67s)

                                                
                                                
=== RUN   TestFunctional/serial/ExtraConfig
functional_test.go:753: (dbg) Run:  out/minikube-linux-arm64 start -p functional-493344 --extra-config=apiserver.enable-admission-plugins=NamespaceAutoProvision --wait=all
E0620 17:10:42.079696    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt: no such file or directory
E0620 17:10:47.200763    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt: no such file or directory
E0620 17:10:57.441693    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt: no such file or directory
E0620 17:11:17.922105    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt: no such file or directory
functional_test.go:753: (dbg) Done: out/minikube-linux-arm64 start -p functional-493344 --extra-config=apiserver.enable-admission-plugins=NamespaceAutoProvision --wait=all: (44.66890638s)
functional_test.go:757: restart took 44.669138196s for "functional-493344" cluster.
--- PASS: TestFunctional/serial/ExtraConfig (44.67s)

                                                
                                    
x
+
TestFunctional/serial/ComponentHealth (0.1s)

                                                
                                                
=== RUN   TestFunctional/serial/ComponentHealth
functional_test.go:806: (dbg) Run:  kubectl --context functional-493344 get po -l tier=control-plane -n kube-system -o=json
functional_test.go:821: etcd phase: Running
functional_test.go:831: etcd status: Ready
functional_test.go:821: kube-apiserver phase: Running
functional_test.go:831: kube-apiserver status: Ready
functional_test.go:821: kube-controller-manager phase: Running
functional_test.go:831: kube-controller-manager status: Ready
functional_test.go:821: kube-scheduler phase: Running
functional_test.go:831: kube-scheduler status: Ready
--- PASS: TestFunctional/serial/ComponentHealth (0.10s)

                                                
                                    
x
+
TestFunctional/serial/LogsCmd (1.17s)

                                                
                                                
=== RUN   TestFunctional/serial/LogsCmd
functional_test.go:1232: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 logs
functional_test.go:1232: (dbg) Done: out/minikube-linux-arm64 -p functional-493344 logs: (1.167895605s)
--- PASS: TestFunctional/serial/LogsCmd (1.17s)

                                                
                                    
x
+
TestFunctional/serial/LogsFileCmd (1.15s)

                                                
                                                
=== RUN   TestFunctional/serial/LogsFileCmd
functional_test.go:1246: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 logs --file /tmp/TestFunctionalserialLogsFileCmd570115642/001/logs.txt
functional_test.go:1246: (dbg) Done: out/minikube-linux-arm64 -p functional-493344 logs --file /tmp/TestFunctionalserialLogsFileCmd570115642/001/logs.txt: (1.14735394s)
--- PASS: TestFunctional/serial/LogsFileCmd (1.15s)

                                                
                                    
x
+
TestFunctional/serial/InvalidService (4.61s)

                                                
                                                
=== RUN   TestFunctional/serial/InvalidService
functional_test.go:2317: (dbg) Run:  kubectl --context functional-493344 apply -f testdata/invalidsvc.yaml
functional_test.go:2331: (dbg) Run:  out/minikube-linux-arm64 service invalid-svc -p functional-493344
functional_test.go:2331: (dbg) Non-zero exit: out/minikube-linux-arm64 service invalid-svc -p functional-493344: exit status 115 (649.044879ms)

                                                
                                                
-- stdout --
	|-----------|-------------|-------------|---------------------------|
	| NAMESPACE |    NAME     | TARGET PORT |            URL            |
	|-----------|-------------|-------------|---------------------------|
	| default   | invalid-svc |          80 | http://192.168.49.2:32385 |
	|-----------|-------------|-------------|---------------------------|
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	X Exiting due to SVC_UNREACHABLE: service not available: no running pod for service invalid-svc found
	* 
	╭─────────────────────────────────────────────────────────────────────────────────────────────╮
	│                                                                                             │
	│    * If the above advice does not help, please let us know:                                 │
	│      https://github.com/kubernetes/minikube/issues/new/choose                               │
	│                                                                                             │
	│    * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue.    │
	│    * Please also attach the following file to the GitHub issue:                             │
	│    * - /tmp/minikube_service_96b204199e3191fa1740d4430b018a3c8028d52d_0.log                 │
	│                                                                                             │
	╰─────────────────────────────────────────────────────────────────────────────────────────────╯

                                                
                                                
** /stderr **
functional_test.go:2323: (dbg) Run:  kubectl --context functional-493344 delete -f testdata/invalidsvc.yaml
--- PASS: TestFunctional/serial/InvalidService (4.61s)

                                                
                                    
x
+
TestFunctional/parallel/ConfigCmd (0.46s)

                                                
                                                
=== RUN   TestFunctional/parallel/ConfigCmd
=== PAUSE TestFunctional/parallel/ConfigCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ConfigCmd
functional_test.go:1195: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 config unset cpus
functional_test.go:1195: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 config get cpus
functional_test.go:1195: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-493344 config get cpus: exit status 14 (83.246433ms)

                                                
                                                
** stderr ** 
	Error: specified key could not be found in config

                                                
                                                
** /stderr **
functional_test.go:1195: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 config set cpus 2
functional_test.go:1195: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 config get cpus
functional_test.go:1195: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 config unset cpus
functional_test.go:1195: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 config get cpus
functional_test.go:1195: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-493344 config get cpus: exit status 14 (93.085693ms)

                                                
                                                
** stderr ** 
	Error: specified key could not be found in config

                                                
                                                
** /stderr **
--- PASS: TestFunctional/parallel/ConfigCmd (0.46s)

                                                
                                    
x
+
TestFunctional/parallel/DashboardCmd (11.86s)

                                                
                                                
=== RUN   TestFunctional/parallel/DashboardCmd
=== PAUSE TestFunctional/parallel/DashboardCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/DashboardCmd
functional_test.go:901: (dbg) daemon: [out/minikube-linux-arm64 dashboard --url --port 36195 -p functional-493344 --alsologtostderr -v=1]
functional_test.go:906: (dbg) stopping [out/minikube-linux-arm64 dashboard --url --port 36195 -p functional-493344 --alsologtostderr -v=1] ...
helpers_test.go:508: unable to kill pid 46082: os: process already finished
--- PASS: TestFunctional/parallel/DashboardCmd (11.86s)

                                                
                                    
x
+
TestFunctional/parallel/DryRun (0.52s)

                                                
                                                
=== RUN   TestFunctional/parallel/DryRun
=== PAUSE TestFunctional/parallel/DryRun

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/DryRun
functional_test.go:970: (dbg) Run:  out/minikube-linux-arm64 start -p functional-493344 --dry-run --memory 250MB --alsologtostderr --driver=docker  --container-runtime=docker
functional_test.go:970: (dbg) Non-zero exit: out/minikube-linux-arm64 start -p functional-493344 --dry-run --memory 250MB --alsologtostderr --driver=docker  --container-runtime=docker: exit status 23 (263.715539ms)

                                                
                                                
-- stdout --
	* [functional-493344] minikube v1.33.1 on Ubuntu 20.04 (arm64)
	  - MINIKUBE_LOCATION=19106
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/19106-2452/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/19106-2452/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-arm64
	  - MINIKUBE_FORCE_SYSTEMD=
	* Using the docker driver based on existing profile
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	I0620 17:12:05.901624   45787 out.go:291] Setting OutFile to fd 1 ...
	I0620 17:12:05.901818   45787 out.go:338] TERM=,COLORTERM=, which probably does not support color
	I0620 17:12:05.901845   45787 out.go:304] Setting ErrFile to fd 2...
	I0620 17:12:05.901862   45787 out.go:338] TERM=,COLORTERM=, which probably does not support color
	I0620 17:12:05.902174   45787 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19106-2452/.minikube/bin
	I0620 17:12:05.902602   45787 out.go:298] Setting JSON to false
	I0620 17:12:05.903657   45787 start.go:129] hostinfo: {"hostname":"ip-172-31-30-239","uptime":3277,"bootTime":1718900249,"procs":229,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1063-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"92f46a7d-c249-4c12-924a-77f64874c910"}
	I0620 17:12:05.903761   45787 start.go:139] virtualization:  
	I0620 17:12:05.909585   45787 out.go:177] * [functional-493344] minikube v1.33.1 on Ubuntu 20.04 (arm64)
	I0620 17:12:05.915781   45787 notify.go:220] Checking for updates...
	I0620 17:12:05.921551   45787 out.go:177]   - MINIKUBE_LOCATION=19106
	I0620 17:12:05.924336   45787 out.go:177]   - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I0620 17:12:05.927763   45787 out.go:177]   - KUBECONFIG=/home/jenkins/minikube-integration/19106-2452/kubeconfig
	I0620 17:12:05.930407   45787 out.go:177]   - MINIKUBE_HOME=/home/jenkins/minikube-integration/19106-2452/.minikube
	I0620 17:12:05.932406   45787 out.go:177]   - MINIKUBE_BIN=out/minikube-linux-arm64
	I0620 17:12:05.935223   45787 out.go:177]   - MINIKUBE_FORCE_SYSTEMD=
	I0620 17:12:05.938226   45787 config.go:182] Loaded profile config "functional-493344": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.30.2
	I0620 17:12:05.938833   45787 driver.go:392] Setting default libvirt URI to qemu:///system
	I0620 17:12:05.982484   45787 docker.go:122] docker version: linux-26.1.4:Docker Engine - Community
	I0620 17:12:05.982683   45787 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0620 17:12:06.071711   45787 info.go:266] docker info: {ID:6ZPO:QZND:VNGE:LUKL:4Y3K:XELL:AAX4:2GTK:E6LM:MPRN:3ZXR:TTMR Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:32 OomKillDisable:true NGoroutines:53 SystemTime:2024-06-20 17:12:06.060430532 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1063-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aar
ch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214900736 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-30-239 Labels:[] ExperimentalBuild:false ServerVersion:26.1.4 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d2d58213f83a351ca8f528a95fbd145f5654e957 Expected:d2d58213f83a351ca8f528a95fbd145f5654e957} RuncCommit:{ID:v1.1.12-0-g51d5e94 Expected:v1.1.12-0-g51d5e94} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErro
rs:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.14.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.27.1]] Warnings:<nil>}}
	I0620 17:12:06.071823   45787 docker.go:295] overlay module found
	I0620 17:12:06.074590   45787 out.go:177] * Using the docker driver based on existing profile
	I0620 17:12:06.076582   45787 start.go:297] selected driver: docker
	I0620 17:12:06.076608   45787 start.go:901] validating driver "docker" against &{Name:functional-493344 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.30.2 ClusterName:functional-493344 Namespace:default APIServerHAVIP: APIServerNa
me:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.30.2 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker Mou
ntIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I0620 17:12:06.076717   45787 start.go:912] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
	I0620 17:12:06.079472   45787 out.go:177] 
	W0620 17:12:06.087751   45787 out.go:239] X Exiting due to RSRC_INSUFFICIENT_REQ_MEMORY: Requested memory allocation 250MiB is less than the usable minimum of 1800MB
	X Exiting due to RSRC_INSUFFICIENT_REQ_MEMORY: Requested memory allocation 250MiB is less than the usable minimum of 1800MB
	I0620 17:12:06.089066   45787 out.go:177] 

                                                
                                                
** /stderr **
functional_test.go:987: (dbg) Run:  out/minikube-linux-arm64 start -p functional-493344 --dry-run --alsologtostderr -v=1 --driver=docker  --container-runtime=docker
--- PASS: TestFunctional/parallel/DryRun (0.52s)

                                                
                                    
x
+
TestFunctional/parallel/InternationalLanguage (0.23s)

                                                
                                                
=== RUN   TestFunctional/parallel/InternationalLanguage
=== PAUSE TestFunctional/parallel/InternationalLanguage

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/InternationalLanguage
functional_test.go:1016: (dbg) Run:  out/minikube-linux-arm64 start -p functional-493344 --dry-run --memory 250MB --alsologtostderr --driver=docker  --container-runtime=docker
functional_test.go:1016: (dbg) Non-zero exit: out/minikube-linux-arm64 start -p functional-493344 --dry-run --memory 250MB --alsologtostderr --driver=docker  --container-runtime=docker: exit status 23 (230.60217ms)

                                                
                                                
-- stdout --
	* [functional-493344] minikube v1.33.1 sur Ubuntu 20.04 (arm64)
	  - MINIKUBE_LOCATION=19106
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/19106-2452/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/19106-2452/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-arm64
	  - MINIKUBE_FORCE_SYSTEMD=
	* Utilisation du pilote docker basé sur le profil existant
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	I0620 17:12:05.663583   45742 out.go:291] Setting OutFile to fd 1 ...
	I0620 17:12:05.663803   45742 out.go:338] TERM=,COLORTERM=, which probably does not support color
	I0620 17:12:05.663815   45742 out.go:304] Setting ErrFile to fd 2...
	I0620 17:12:05.663821   45742 out.go:338] TERM=,COLORTERM=, which probably does not support color
	I0620 17:12:05.664662   45742 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19106-2452/.minikube/bin
	I0620 17:12:05.665110   45742 out.go:298] Setting JSON to false
	I0620 17:12:05.666139   45742 start.go:129] hostinfo: {"hostname":"ip-172-31-30-239","uptime":3277,"bootTime":1718900249,"procs":229,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1063-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"92f46a7d-c249-4c12-924a-77f64874c910"}
	I0620 17:12:05.666236   45742 start.go:139] virtualization:  
	I0620 17:12:05.668562   45742 out.go:177] * [functional-493344] minikube v1.33.1 sur Ubuntu 20.04 (arm64)
	I0620 17:12:05.671072   45742 out.go:177]   - MINIKUBE_LOCATION=19106
	I0620 17:12:05.671143   45742 notify.go:220] Checking for updates...
	I0620 17:12:05.674791   45742 out.go:177]   - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I0620 17:12:05.676571   45742 out.go:177]   - KUBECONFIG=/home/jenkins/minikube-integration/19106-2452/kubeconfig
	I0620 17:12:05.678277   45742 out.go:177]   - MINIKUBE_HOME=/home/jenkins/minikube-integration/19106-2452/.minikube
	I0620 17:12:05.680504   45742 out.go:177]   - MINIKUBE_BIN=out/minikube-linux-arm64
	I0620 17:12:05.683278   45742 out.go:177]   - MINIKUBE_FORCE_SYSTEMD=
	I0620 17:12:05.685632   45742 config.go:182] Loaded profile config "functional-493344": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.30.2
	I0620 17:12:05.686245   45742 driver.go:392] Setting default libvirt URI to qemu:///system
	I0620 17:12:05.710389   45742 docker.go:122] docker version: linux-26.1.4:Docker Engine - Community
	I0620 17:12:05.710518   45742 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0620 17:12:05.819634   45742 info.go:266] docker info: {ID:6ZPO:QZND:VNGE:LUKL:4Y3K:XELL:AAX4:2GTK:E6LM:MPRN:3ZXR:TTMR Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:32 OomKillDisable:true NGoroutines:53 SystemTime:2024-06-20 17:12:05.807134202 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1063-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aar
ch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214900736 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-30-239 Labels:[] ExperimentalBuild:false ServerVersion:26.1.4 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d2d58213f83a351ca8f528a95fbd145f5654e957 Expected:d2d58213f83a351ca8f528a95fbd145f5654e957} RuncCommit:{ID:v1.1.12-0-g51d5e94 Expected:v1.1.12-0-g51d5e94} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErro
rs:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.14.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.27.1]] Warnings:<nil>}}
	I0620 17:12:05.819737   45742 docker.go:295] overlay module found
	I0620 17:12:05.822441   45742 out.go:177] * Utilisation du pilote docker basé sur le profil existant
	I0620 17:12:05.824341   45742 start.go:297] selected driver: docker
	I0620 17:12:05.824359   45742 start.go:901] validating driver "docker" against &{Name:functional-493344 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718753665-19106@sha256:735aacbd61d487240dc39ba6e4d70dd6ae1ad6181ca2ba092d372605e48ee636 Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.30.2 ClusterName:functional-493344 Namespace:default APIServerHAVIP: APIServerNa
me:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.30.2 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker Mou
ntIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I0620 17:12:05.824480   45742 start.go:912] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
	I0620 17:12:05.826620   45742 out.go:177] 
	W0620 17:12:05.829663   45742 out.go:239] X Fermeture en raison de RSRC_INSUFFICIENT_REQ_MEMORY : L'allocation de mémoire demandée 250 Mio est inférieure au minimum utilisable de 1800 Mo
	X Fermeture en raison de RSRC_INSUFFICIENT_REQ_MEMORY : L'allocation de mémoire demandée 250 Mio est inférieure au minimum utilisable de 1800 Mo
	I0620 17:12:05.831555   45742 out.go:177] 

                                                
                                                
** /stderr **
--- PASS: TestFunctional/parallel/InternationalLanguage (0.23s)

                                                
                                    
x
+
TestFunctional/parallel/StatusCmd (1.08s)

                                                
                                                
=== RUN   TestFunctional/parallel/StatusCmd
=== PAUSE TestFunctional/parallel/StatusCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/StatusCmd
functional_test.go:850: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 status
functional_test.go:856: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 status -f host:{{.Host}},kublet:{{.Kubelet}},apiserver:{{.APIServer}},kubeconfig:{{.Kubeconfig}}
functional_test.go:868: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 status -o json
--- PASS: TestFunctional/parallel/StatusCmd (1.08s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmdConnect (12.68s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmdConnect
=== PAUSE TestFunctional/parallel/ServiceCmdConnect

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ServiceCmdConnect
functional_test.go:1623: (dbg) Run:  kubectl --context functional-493344 create deployment hello-node-connect --image=registry.k8s.io/echoserver-arm:1.8
functional_test.go:1631: (dbg) Run:  kubectl --context functional-493344 expose deployment hello-node-connect --type=NodePort --port=8080
functional_test.go:1636: (dbg) TestFunctional/parallel/ServiceCmdConnect: waiting 10m0s for pods matching "app=hello-node-connect" in namespace "default" ...
helpers_test.go:344: "hello-node-connect-6f49f58cd5-r8n5j" [55ca83f3-522d-4bf2-8b75-5a501b32413e] Pending / Ready:ContainersNotReady (containers with unready status: [echoserver-arm]) / ContainersReady:ContainersNotReady (containers with unready status: [echoserver-arm])
helpers_test.go:344: "hello-node-connect-6f49f58cd5-r8n5j" [55ca83f3-522d-4bf2-8b75-5a501b32413e] Running
functional_test.go:1636: (dbg) TestFunctional/parallel/ServiceCmdConnect: app=hello-node-connect healthy within 12.003471608s
functional_test.go:1645: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 service hello-node-connect --url
functional_test.go:1651: found endpoint for hello-node-connect: http://192.168.49.2:30963
functional_test.go:1671: http://192.168.49.2:30963: success! body:

                                                
                                                

                                                
                                                
Hostname: hello-node-connect-6f49f58cd5-r8n5j

                                                
                                                
Pod Information:
	-no pod information available-

                                                
                                                
Server values:
	server_version=nginx: 1.13.3 - lua: 10008

                                                
                                                
Request Information:
	client_address=10.244.0.1
	method=GET
	real path=/
	query=
	request_version=1.1
	request_uri=http://192.168.49.2:8080/

                                                
                                                
Request Headers:
	accept-encoding=gzip
	host=192.168.49.2:30963
	user-agent=Go-http-client/1.1

                                                
                                                
Request Body:
	-no body in request-

                                                
                                                
--- PASS: TestFunctional/parallel/ServiceCmdConnect (12.68s)

                                                
                                    
x
+
TestFunctional/parallel/AddonsCmd (0.2s)

                                                
                                                
=== RUN   TestFunctional/parallel/AddonsCmd
=== PAUSE TestFunctional/parallel/AddonsCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/AddonsCmd
functional_test.go:1686: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 addons list
functional_test.go:1698: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 addons list -o json
--- PASS: TestFunctional/parallel/AddonsCmd (0.20s)

                                                
                                    
x
+
TestFunctional/parallel/PersistentVolumeClaim (26.87s)

                                                
                                                
=== RUN   TestFunctional/parallel/PersistentVolumeClaim
=== PAUSE TestFunctional/parallel/PersistentVolumeClaim

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/PersistentVolumeClaim
functional_test_pvc_test.go:44: (dbg) TestFunctional/parallel/PersistentVolumeClaim: waiting 4m0s for pods matching "integration-test=storage-provisioner" in namespace "kube-system" ...
helpers_test.go:344: "storage-provisioner" [04bc6e80-9490-4299-a182-95f7d1ea85b8] Running
functional_test_pvc_test.go:44: (dbg) TestFunctional/parallel/PersistentVolumeClaim: integration-test=storage-provisioner healthy within 5.004384623s
functional_test_pvc_test.go:49: (dbg) Run:  kubectl --context functional-493344 get storageclass -o=json
functional_test_pvc_test.go:69: (dbg) Run:  kubectl --context functional-493344 apply -f testdata/storage-provisioner/pvc.yaml
functional_test_pvc_test.go:76: (dbg) Run:  kubectl --context functional-493344 get pvc myclaim -o=json
functional_test_pvc_test.go:125: (dbg) Run:  kubectl --context functional-493344 apply -f testdata/storage-provisioner/pod.yaml
functional_test_pvc_test.go:130: (dbg) TestFunctional/parallel/PersistentVolumeClaim: waiting 3m0s for pods matching "test=storage-provisioner" in namespace "default" ...
helpers_test.go:344: "sp-pod" [21e703c8-f29a-49b9-9fa0-8ae03c0006b7] Pending
helpers_test.go:344: "sp-pod" [21e703c8-f29a-49b9-9fa0-8ae03c0006b7] Pending / Ready:ContainersNotReady (containers with unready status: [myfrontend]) / ContainersReady:ContainersNotReady (containers with unready status: [myfrontend])
helpers_test.go:344: "sp-pod" [21e703c8-f29a-49b9-9fa0-8ae03c0006b7] Running
functional_test_pvc_test.go:130: (dbg) TestFunctional/parallel/PersistentVolumeClaim: test=storage-provisioner healthy within 12.003878678s
functional_test_pvc_test.go:100: (dbg) Run:  kubectl --context functional-493344 exec sp-pod -- touch /tmp/mount/foo
functional_test_pvc_test.go:106: (dbg) Run:  kubectl --context functional-493344 delete -f testdata/storage-provisioner/pod.yaml
functional_test_pvc_test.go:125: (dbg) Run:  kubectl --context functional-493344 apply -f testdata/storage-provisioner/pod.yaml
functional_test_pvc_test.go:130: (dbg) TestFunctional/parallel/PersistentVolumeClaim: waiting 3m0s for pods matching "test=storage-provisioner" in namespace "default" ...
helpers_test.go:344: "sp-pod" [18479964-816e-482c-8a25-8865ff445dd3] Pending
helpers_test.go:344: "sp-pod" [18479964-816e-482c-8a25-8865ff445dd3] Pending / Ready:ContainersNotReady (containers with unready status: [myfrontend]) / ContainersReady:ContainersNotReady (containers with unready status: [myfrontend])
helpers_test.go:344: "sp-pod" [18479964-816e-482c-8a25-8865ff445dd3] Running
functional_test_pvc_test.go:130: (dbg) TestFunctional/parallel/PersistentVolumeClaim: test=storage-provisioner healthy within 8.011496935s
functional_test_pvc_test.go:114: (dbg) Run:  kubectl --context functional-493344 exec sp-pod -- ls /tmp/mount
--- PASS: TestFunctional/parallel/PersistentVolumeClaim (26.87s)

                                                
                                    
x
+
TestFunctional/parallel/SSHCmd (0.66s)

                                                
                                                
=== RUN   TestFunctional/parallel/SSHCmd
=== PAUSE TestFunctional/parallel/SSHCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/SSHCmd
functional_test.go:1721: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 ssh "echo hello"
functional_test.go:1738: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 ssh "cat /etc/hostname"
--- PASS: TestFunctional/parallel/SSHCmd (0.66s)

                                                
                                    
x
+
TestFunctional/parallel/CpCmd (2.31s)

                                                
                                                
=== RUN   TestFunctional/parallel/CpCmd
=== PAUSE TestFunctional/parallel/CpCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/CpCmd
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 cp testdata/cp-test.txt /home/docker/cp-test.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 ssh -n functional-493344 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 cp functional-493344:/home/docker/cp-test.txt /tmp/TestFunctionalparallelCpCmd1241716154/001/cp-test.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 ssh -n functional-493344 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 cp testdata/cp-test.txt /tmp/does/not/exist/cp-test.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 ssh -n functional-493344 "sudo cat /tmp/does/not/exist/cp-test.txt"
--- PASS: TestFunctional/parallel/CpCmd (2.31s)

                                                
                                    
x
+
TestFunctional/parallel/FileSync (0.34s)

                                                
                                                
=== RUN   TestFunctional/parallel/FileSync
=== PAUSE TestFunctional/parallel/FileSync

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/FileSync
functional_test.go:1925: Checking for existence of /etc/test/nested/copy/7784/hosts within VM
functional_test.go:1927: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 ssh "sudo cat /etc/test/nested/copy/7784/hosts"
functional_test.go:1932: file sync test content: Test file for checking file sync process
--- PASS: TestFunctional/parallel/FileSync (0.34s)

                                                
                                    
x
+
TestFunctional/parallel/CertSync (2.08s)

                                                
                                                
=== RUN   TestFunctional/parallel/CertSync
=== PAUSE TestFunctional/parallel/CertSync

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/CertSync
functional_test.go:1968: Checking for existence of /etc/ssl/certs/7784.pem within VM
functional_test.go:1969: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 ssh "sudo cat /etc/ssl/certs/7784.pem"
functional_test.go:1968: Checking for existence of /usr/share/ca-certificates/7784.pem within VM
functional_test.go:1969: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 ssh "sudo cat /usr/share/ca-certificates/7784.pem"
functional_test.go:1968: Checking for existence of /etc/ssl/certs/51391683.0 within VM
functional_test.go:1969: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 ssh "sudo cat /etc/ssl/certs/51391683.0"
functional_test.go:1995: Checking for existence of /etc/ssl/certs/77842.pem within VM
functional_test.go:1996: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 ssh "sudo cat /etc/ssl/certs/77842.pem"
functional_test.go:1995: Checking for existence of /usr/share/ca-certificates/77842.pem within VM
functional_test.go:1996: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 ssh "sudo cat /usr/share/ca-certificates/77842.pem"
functional_test.go:1995: Checking for existence of /etc/ssl/certs/3ec20f2e.0 within VM
functional_test.go:1996: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 ssh "sudo cat /etc/ssl/certs/3ec20f2e.0"
--- PASS: TestFunctional/parallel/CertSync (2.08s)

                                                
                                    
x
+
TestFunctional/parallel/NodeLabels (0.11s)

                                                
                                                
=== RUN   TestFunctional/parallel/NodeLabels
=== PAUSE TestFunctional/parallel/NodeLabels

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/NodeLabels
functional_test.go:218: (dbg) Run:  kubectl --context functional-493344 get nodes --output=go-template "--template='{{range $k, $v := (index .items 0).metadata.labels}}{{$k}} {{end}}'"
--- PASS: TestFunctional/parallel/NodeLabels (0.11s)

                                                
                                    
x
+
TestFunctional/parallel/NonActiveRuntimeDisabled (0.35s)

                                                
                                                
=== RUN   TestFunctional/parallel/NonActiveRuntimeDisabled
=== PAUSE TestFunctional/parallel/NonActiveRuntimeDisabled

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/NonActiveRuntimeDisabled
functional_test.go:2023: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 ssh "sudo systemctl is-active crio"
functional_test.go:2023: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-493344 ssh "sudo systemctl is-active crio": exit status 1 (346.220078ms)

                                                
                                                
-- stdout --
	inactive

                                                
                                                
-- /stdout --
** stderr ** 
	ssh: Process exited with status 3

                                                
                                                
** /stderr **
--- PASS: TestFunctional/parallel/NonActiveRuntimeDisabled (0.35s)

                                                
                                    
x
+
TestFunctional/parallel/License (0.24s)

                                                
                                                
=== RUN   TestFunctional/parallel/License
=== PAUSE TestFunctional/parallel/License

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/License
functional_test.go:2284: (dbg) Run:  out/minikube-linux-arm64 license
--- PASS: TestFunctional/parallel/License (0.24s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/RunSecondTunnel (0.61s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/RunSecondTunnel
functional_test_tunnel_test.go:154: (dbg) daemon: [out/minikube-linux-arm64 -p functional-493344 tunnel --alsologtostderr]
functional_test_tunnel_test.go:154: (dbg) daemon: [out/minikube-linux-arm64 -p functional-493344 tunnel --alsologtostderr]
functional_test_tunnel_test.go:194: (dbg) stopping [out/minikube-linux-arm64 -p functional-493344 tunnel --alsologtostderr] ...
helpers_test.go:508: unable to kill pid 43249: os: process already finished
helpers_test.go:502: unable to terminate pid 43080: os: process already finished
functional_test_tunnel_test.go:194: (dbg) stopping [out/minikube-linux-arm64 -p functional-493344 tunnel --alsologtostderr] ...
helpers_test.go:490: unable to find parent, assuming dead: process does not exist
--- PASS: TestFunctional/parallel/TunnelCmd/serial/RunSecondTunnel (0.61s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/StartTunnel (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/StartTunnel
functional_test_tunnel_test.go:129: (dbg) daemon: [out/minikube-linux-arm64 -p functional-493344 tunnel --alsologtostderr]
--- PASS: TestFunctional/parallel/TunnelCmd/serial/StartTunnel (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/WaitService/Setup (8.44s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/WaitService/Setup
functional_test_tunnel_test.go:212: (dbg) Run:  kubectl --context functional-493344 apply -f testdata/testsvc.yaml
functional_test_tunnel_test.go:216: (dbg) TestFunctional/parallel/TunnelCmd/serial/WaitService/Setup: waiting 4m0s for pods matching "run=nginx-svc" in namespace "default" ...
helpers_test.go:344: "nginx-svc" [03ed8a5a-dc72-4b92-94af-d5554cd266eb] Pending / Ready:ContainersNotReady (containers with unready status: [nginx]) / ContainersReady:ContainersNotReady (containers with unready status: [nginx])
helpers_test.go:344: "nginx-svc" [03ed8a5a-dc72-4b92-94af-d5554cd266eb] Running
functional_test_tunnel_test.go:216: (dbg) TestFunctional/parallel/TunnelCmd/serial/WaitService/Setup: run=nginx-svc healthy within 8.003808679s
--- PASS: TestFunctional/parallel/TunnelCmd/serial/WaitService/Setup (8.44s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/WaitService/IngressIP (0.11s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/WaitService/IngressIP
functional_test_tunnel_test.go:234: (dbg) Run:  kubectl --context functional-493344 get svc nginx-svc -o jsonpath={.status.loadBalancer.ingress[0].ip}
--- PASS: TestFunctional/parallel/TunnelCmd/serial/WaitService/IngressIP (0.11s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/AccessDirect (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/AccessDirect
functional_test_tunnel_test.go:299: tunnel at http://10.97.137.131 is working!
--- PASS: TestFunctional/parallel/TunnelCmd/serial/AccessDirect (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/DeleteTunnel (0.11s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/DeleteTunnel
functional_test_tunnel_test.go:434: (dbg) stopping [out/minikube-linux-arm64 -p functional-493344 tunnel --alsologtostderr] ...
--- PASS: TestFunctional/parallel/TunnelCmd/serial/DeleteTunnel (0.11s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/DeployApp (7.26s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/DeployApp
functional_test.go:1433: (dbg) Run:  kubectl --context functional-493344 create deployment hello-node --image=registry.k8s.io/echoserver-arm:1.8
functional_test.go:1441: (dbg) Run:  kubectl --context functional-493344 expose deployment hello-node --type=NodePort --port=8080
functional_test.go:1446: (dbg) TestFunctional/parallel/ServiceCmd/DeployApp: waiting 10m0s for pods matching "app=hello-node" in namespace "default" ...
helpers_test.go:344: "hello-node-65f5d5cc78-k97mn" [ddf7f952-a479-46b8-b40a-7a6850437d42] Pending / Ready:ContainersNotReady (containers with unready status: [echoserver-arm]) / ContainersReady:ContainersNotReady (containers with unready status: [echoserver-arm])
helpers_test.go:344: "hello-node-65f5d5cc78-k97mn" [ddf7f952-a479-46b8-b40a-7a6850437d42] Running
E0620 17:11:58.882880    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt: no such file or directory
functional_test.go:1446: (dbg) TestFunctional/parallel/ServiceCmd/DeployApp: app=hello-node healthy within 7.003964853s
--- PASS: TestFunctional/parallel/ServiceCmd/DeployApp (7.26s)

                                                
                                    
x
+
TestFunctional/parallel/ProfileCmd/profile_not_create (0.52s)

                                                
                                                
=== RUN   TestFunctional/parallel/ProfileCmd/profile_not_create
functional_test.go:1266: (dbg) Run:  out/minikube-linux-arm64 profile lis
functional_test.go:1271: (dbg) Run:  out/minikube-linux-arm64 profile list --output json
--- PASS: TestFunctional/parallel/ProfileCmd/profile_not_create (0.52s)

                                                
                                    
x
+
TestFunctional/parallel/ProfileCmd/profile_list (0.39s)

                                                
                                                
=== RUN   TestFunctional/parallel/ProfileCmd/profile_list
functional_test.go:1306: (dbg) Run:  out/minikube-linux-arm64 profile list
functional_test.go:1311: Took "339.193749ms" to run "out/minikube-linux-arm64 profile list"
functional_test.go:1320: (dbg) Run:  out/minikube-linux-arm64 profile list -l
functional_test.go:1325: Took "49.699278ms" to run "out/minikube-linux-arm64 profile list -l"
--- PASS: TestFunctional/parallel/ProfileCmd/profile_list (0.39s)

                                                
                                    
x
+
TestFunctional/parallel/ProfileCmd/profile_json_output (0.37s)

                                                
                                                
=== RUN   TestFunctional/parallel/ProfileCmd/profile_json_output
functional_test.go:1357: (dbg) Run:  out/minikube-linux-arm64 profile list -o json
functional_test.go:1362: Took "316.323782ms" to run "out/minikube-linux-arm64 profile list -o json"
functional_test.go:1370: (dbg) Run:  out/minikube-linux-arm64 profile list -o json --light
functional_test.go:1375: Took "54.12418ms" to run "out/minikube-linux-arm64 profile list -o json --light"
--- PASS: TestFunctional/parallel/ProfileCmd/profile_json_output (0.37s)

                                                
                                    
x
+
TestFunctional/parallel/MountCmd/any-port (8.58s)

                                                
                                                
=== RUN   TestFunctional/parallel/MountCmd/any-port
functional_test_mount_test.go:73: (dbg) daemon: [out/minikube-linux-arm64 mount -p functional-493344 /tmp/TestFunctionalparallelMountCmdany-port3078866329/001:/mount-9p --alsologtostderr -v=1]
functional_test_mount_test.go:107: wrote "test-1718903521819070429" to /tmp/TestFunctionalparallelMountCmdany-port3078866329/001/created-by-test
functional_test_mount_test.go:107: wrote "test-1718903521819070429" to /tmp/TestFunctionalparallelMountCmdany-port3078866329/001/created-by-test-removed-by-pod
functional_test_mount_test.go:107: wrote "test-1718903521819070429" to /tmp/TestFunctionalparallelMountCmdany-port3078866329/001/test-1718903521819070429
functional_test_mount_test.go:115: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 ssh "findmnt -T /mount-9p | grep 9p"
functional_test_mount_test.go:115: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-493344 ssh "findmnt -T /mount-9p | grep 9p": exit status 1 (392.660709ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
functional_test_mount_test.go:115: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 ssh "findmnt -T /mount-9p | grep 9p"
functional_test_mount_test.go:129: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 ssh -- ls -la /mount-9p
functional_test_mount_test.go:133: guest mount directory contents
total 2
-rw-r--r-- 1 docker docker 24 Jun 20 17:12 created-by-test
-rw-r--r-- 1 docker docker 24 Jun 20 17:12 created-by-test-removed-by-pod
-rw-r--r-- 1 docker docker 24 Jun 20 17:12 test-1718903521819070429
functional_test_mount_test.go:137: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 ssh cat /mount-9p/test-1718903521819070429
functional_test_mount_test.go:148: (dbg) Run:  kubectl --context functional-493344 replace --force -f testdata/busybox-mount-test.yaml
functional_test_mount_test.go:153: (dbg) TestFunctional/parallel/MountCmd/any-port: waiting 4m0s for pods matching "integration-test=busybox-mount" in namespace "default" ...
helpers_test.go:344: "busybox-mount" [8ac656c2-6232-4ba7-8f93-757d492461b7] Pending
helpers_test.go:344: "busybox-mount" [8ac656c2-6232-4ba7-8f93-757d492461b7] Pending / Ready:ContainersNotReady (containers with unready status: [mount-munger]) / ContainersReady:ContainersNotReady (containers with unready status: [mount-munger])
helpers_test.go:344: "busybox-mount" [8ac656c2-6232-4ba7-8f93-757d492461b7] Pending / Initialized:PodCompleted / Ready:PodCompleted / ContainersReady:PodCompleted
helpers_test.go:344: "busybox-mount" [8ac656c2-6232-4ba7-8f93-757d492461b7] Succeeded / Initialized:PodCompleted / Ready:PodCompleted / ContainersReady:PodCompleted
functional_test_mount_test.go:153: (dbg) TestFunctional/parallel/MountCmd/any-port: integration-test=busybox-mount healthy within 5.005331579s
functional_test_mount_test.go:169: (dbg) Run:  kubectl --context functional-493344 logs busybox-mount
functional_test_mount_test.go:181: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 ssh stat /mount-9p/created-by-test
functional_test_mount_test.go:181: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 ssh stat /mount-9p/created-by-pod
functional_test_mount_test.go:90: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 ssh "sudo umount -f /mount-9p"
functional_test_mount_test.go:94: (dbg) stopping [out/minikube-linux-arm64 mount -p functional-493344 /tmp/TestFunctionalparallelMountCmdany-port3078866329/001:/mount-9p --alsologtostderr -v=1] ...
--- PASS: TestFunctional/parallel/MountCmd/any-port (8.58s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/List (0.68s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/List
functional_test.go:1455: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 service list
--- PASS: TestFunctional/parallel/ServiceCmd/List (0.68s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/JSONOutput (0.52s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/JSONOutput
functional_test.go:1485: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 service list -o json
functional_test.go:1490: Took "519.128614ms" to run "out/minikube-linux-arm64 -p functional-493344 service list -o json"
--- PASS: TestFunctional/parallel/ServiceCmd/JSONOutput (0.52s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/HTTPS (0.5s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/HTTPS
functional_test.go:1505: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 service --namespace=default --https --url hello-node
functional_test.go:1518: found endpoint: https://192.168.49.2:30567
--- PASS: TestFunctional/parallel/ServiceCmd/HTTPS (0.50s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/Format (0.53s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/Format
functional_test.go:1536: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 service hello-node --url --format={{.IP}}
--- PASS: TestFunctional/parallel/ServiceCmd/Format (0.53s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/URL (0.41s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/URL
functional_test.go:1555: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 service hello-node --url
functional_test.go:1561: found endpoint for hello-node: http://192.168.49.2:30567
--- PASS: TestFunctional/parallel/ServiceCmd/URL (0.41s)

                                                
                                    
x
+
TestFunctional/parallel/MountCmd/specific-port (2.07s)

                                                
                                                
=== RUN   TestFunctional/parallel/MountCmd/specific-port
functional_test_mount_test.go:213: (dbg) daemon: [out/minikube-linux-arm64 mount -p functional-493344 /tmp/TestFunctionalparallelMountCmdspecific-port2300270154/001:/mount-9p --alsologtostderr -v=1 --port 46464]
functional_test_mount_test.go:243: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 ssh "findmnt -T /mount-9p | grep 9p"
functional_test_mount_test.go:243: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-493344 ssh "findmnt -T /mount-9p | grep 9p": exit status 1 (440.812072ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
functional_test_mount_test.go:243: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 ssh "findmnt -T /mount-9p | grep 9p"
functional_test_mount_test.go:257: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 ssh -- ls -la /mount-9p
functional_test_mount_test.go:261: guest mount directory contents
total 0
functional_test_mount_test.go:263: (dbg) stopping [out/minikube-linux-arm64 mount -p functional-493344 /tmp/TestFunctionalparallelMountCmdspecific-port2300270154/001:/mount-9p --alsologtostderr -v=1 --port 46464] ...
functional_test_mount_test.go:264: reading mount text
functional_test_mount_test.go:278: done reading mount text
functional_test_mount_test.go:230: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 ssh "sudo umount -f /mount-9p"
functional_test_mount_test.go:230: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-493344 ssh "sudo umount -f /mount-9p": exit status 1 (325.820664ms)

                                                
                                                
-- stdout --
	umount: /mount-9p: not mounted.

                                                
                                                
-- /stdout --
** stderr ** 
	ssh: Process exited with status 32

                                                
                                                
** /stderr **
functional_test_mount_test.go:232: "out/minikube-linux-arm64 -p functional-493344 ssh \"sudo umount -f /mount-9p\"": exit status 1
functional_test_mount_test.go:234: (dbg) stopping [out/minikube-linux-arm64 mount -p functional-493344 /tmp/TestFunctionalparallelMountCmdspecific-port2300270154/001:/mount-9p --alsologtostderr -v=1 --port 46464] ...
--- PASS: TestFunctional/parallel/MountCmd/specific-port (2.07s)

                                                
                                    
x
+
TestFunctional/parallel/MountCmd/VerifyCleanup (2.7s)

                                                
                                                
=== RUN   TestFunctional/parallel/MountCmd/VerifyCleanup
functional_test_mount_test.go:298: (dbg) daemon: [out/minikube-linux-arm64 mount -p functional-493344 /tmp/TestFunctionalparallelMountCmdVerifyCleanup2980384325/001:/mount1 --alsologtostderr -v=1]
functional_test_mount_test.go:298: (dbg) daemon: [out/minikube-linux-arm64 mount -p functional-493344 /tmp/TestFunctionalparallelMountCmdVerifyCleanup2980384325/001:/mount2 --alsologtostderr -v=1]
functional_test_mount_test.go:298: (dbg) daemon: [out/minikube-linux-arm64 mount -p functional-493344 /tmp/TestFunctionalparallelMountCmdVerifyCleanup2980384325/001:/mount3 --alsologtostderr -v=1]
functional_test_mount_test.go:325: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 ssh "findmnt -T" /mount1
functional_test_mount_test.go:325: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-493344 ssh "findmnt -T" /mount1: exit status 1 (813.516941ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
functional_test_mount_test.go:325: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 ssh "findmnt -T" /mount1
functional_test_mount_test.go:325: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 ssh "findmnt -T" /mount2
functional_test_mount_test.go:325: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 ssh "findmnt -T" /mount3
functional_test_mount_test.go:370: (dbg) Run:  out/minikube-linux-arm64 mount -p functional-493344 --kill=true
functional_test_mount_test.go:313: (dbg) stopping [out/minikube-linux-arm64 mount -p functional-493344 /tmp/TestFunctionalparallelMountCmdVerifyCleanup2980384325/001:/mount1 --alsologtostderr -v=1] ...
helpers_test.go:490: unable to find parent, assuming dead: process does not exist
functional_test_mount_test.go:313: (dbg) stopping [out/minikube-linux-arm64 mount -p functional-493344 /tmp/TestFunctionalparallelMountCmdVerifyCleanup2980384325/001:/mount2 --alsologtostderr -v=1] ...
helpers_test.go:490: unable to find parent, assuming dead: process does not exist
functional_test_mount_test.go:313: (dbg) stopping [out/minikube-linux-arm64 mount -p functional-493344 /tmp/TestFunctionalparallelMountCmdVerifyCleanup2980384325/001:/mount3 --alsologtostderr -v=1] ...
helpers_test.go:490: unable to find parent, assuming dead: process does not exist
--- PASS: TestFunctional/parallel/MountCmd/VerifyCleanup (2.70s)

                                                
                                    
x
+
TestFunctional/parallel/Version/short (0.07s)

                                                
                                                
=== RUN   TestFunctional/parallel/Version/short
=== PAUSE TestFunctional/parallel/Version/short

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/Version/short
functional_test.go:2252: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 version --short
--- PASS: TestFunctional/parallel/Version/short (0.07s)

                                                
                                    
x
+
TestFunctional/parallel/Version/components (0.99s)

                                                
                                                
=== RUN   TestFunctional/parallel/Version/components
=== PAUSE TestFunctional/parallel/Version/components

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/Version/components
functional_test.go:2266: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 version -o=json --components
--- PASS: TestFunctional/parallel/Version/components (0.99s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageListShort (0.25s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageListShort
=== PAUSE TestFunctional/parallel/ImageCommands/ImageListShort

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ImageCommands/ImageListShort
functional_test.go:260: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 image ls --format short --alsologtostderr
functional_test.go:265: (dbg) Stdout: out/minikube-linux-arm64 -p functional-493344 image ls --format short --alsologtostderr:
registry.k8s.io/pause:latest
registry.k8s.io/pause:3.9
registry.k8s.io/pause:3.3
registry.k8s.io/pause:3.1
registry.k8s.io/kube-scheduler:v1.30.2
registry.k8s.io/kube-proxy:v1.30.2
registry.k8s.io/kube-controller-manager:v1.30.2
registry.k8s.io/kube-apiserver:v1.30.2
registry.k8s.io/etcd:3.5.12-0
registry.k8s.io/echoserver-arm:1.8
registry.k8s.io/coredns/coredns:v1.11.1
gcr.io/k8s-minikube/storage-provisioner:v5
gcr.io/k8s-minikube/busybox:1.28.4-glibc
gcr.io/google-containers/addon-resizer:functional-493344
docker.io/library/nginx:latest
docker.io/library/nginx:alpine
docker.io/library/minikube-local-cache-test:functional-493344
docker.io/kubernetesui/metrics-scraper:<none>
docker.io/kubernetesui/dashboard:<none>
functional_test.go:268: (dbg) Stderr: out/minikube-linux-arm64 -p functional-493344 image ls --format short --alsologtostderr:
I0620 17:12:34.382552   48682 out.go:291] Setting OutFile to fd 1 ...
I0620 17:12:34.382733   48682 out.go:338] TERM=,COLORTERM=, which probably does not support color
I0620 17:12:34.382745   48682 out.go:304] Setting ErrFile to fd 2...
I0620 17:12:34.382751   48682 out.go:338] TERM=,COLORTERM=, which probably does not support color
I0620 17:12:34.383095   48682 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19106-2452/.minikube/bin
I0620 17:12:34.383941   48682 config.go:182] Loaded profile config "functional-493344": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.30.2
I0620 17:12:34.384101   48682 config.go:182] Loaded profile config "functional-493344": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.30.2
I0620 17:12:34.385100   48682 cli_runner.go:164] Run: docker container inspect functional-493344 --format={{.State.Status}}
I0620 17:12:34.406147   48682 ssh_runner.go:195] Run: systemctl --version
I0620 17:12:34.406198   48682 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-493344
I0620 17:12:34.425855   48682 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32782 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/functional-493344/id_rsa Username:docker}
I0620 17:12:34.522249   48682 ssh_runner.go:195] Run: docker images --no-trunc --format "{{json .}}"
--- PASS: TestFunctional/parallel/ImageCommands/ImageListShort (0.25s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageListTable (0.26s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageListTable
=== PAUSE TestFunctional/parallel/ImageCommands/ImageListTable

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ImageCommands/ImageListTable
functional_test.go:260: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 image ls --format table --alsologtostderr
functional_test.go:265: (dbg) Stdout: out/minikube-linux-arm64 -p functional-493344 image ls --format table --alsologtostderr:
|---------------------------------------------|-------------------|---------------|--------|
|                    Image                    |        Tag        |   Image ID    |  Size  |
|---------------------------------------------|-------------------|---------------|--------|
| registry.k8s.io/kube-scheduler              | v1.30.2           | c7dd04b1bafeb | 60.5MB |
| registry.k8s.io/kube-proxy                  | v1.30.2           | 66dbb96a9149f | 87.9MB |
| docker.io/library/nginx                     | latest            | 11ceee7cdc572 | 193MB  |
| docker.io/library/nginx                     | alpine            | 4f49228258b64 | 49.7MB |
| registry.k8s.io/coredns/coredns             | v1.11.1           | 2437cf7621777 | 57.4MB |
| registry.k8s.io/pause                       | 3.9               | 829e9de338bd5 | 514kB  |
| gcr.io/k8s-minikube/storage-provisioner     | v5                | ba04bb24b9575 | 29MB   |
| registry.k8s.io/kube-controller-manager     | v1.30.2           | e1dcc3400d3ea | 107MB  |
| registry.k8s.io/echoserver-arm              | 1.8               | 72565bf5bbedf | 85MB   |
| gcr.io/k8s-minikube/busybox                 | 1.28.4-glibc      | 1611cd07b61d5 | 3.55MB |
| registry.k8s.io/pause                       | 3.1               | 8057e0500773a | 525kB  |
| registry.k8s.io/pause                       | 3.3               | 3d18732f8686c | 484kB  |
| docker.io/kubernetesui/dashboard            | <none>            | 20b332c9a70d8 | 244MB  |
| docker.io/kubernetesui/metrics-scraper      | <none>            | a422e0e982356 | 42.3MB |
| gcr.io/google-containers/addon-resizer      | functional-493344 | ffd4cfbbe753e | 32.9MB |
| registry.k8s.io/etcd                        | 3.5.12-0          | 014faa467e297 | 139MB  |
| registry.k8s.io/kube-apiserver              | v1.30.2           | 84c601f3f72c8 | 112MB  |
| registry.k8s.io/pause                       | latest            | 8cb2091f603e7 | 240kB  |
| docker.io/library/minikube-local-cache-test | functional-493344 | 0d14c413bec53 | 30B    |
|---------------------------------------------|-------------------|---------------|--------|
functional_test.go:268: (dbg) Stderr: out/minikube-linux-arm64 -p functional-493344 image ls --format table --alsologtostderr:
I0620 17:12:34.886739   48813 out.go:291] Setting OutFile to fd 1 ...
I0620 17:12:34.886972   48813 out.go:338] TERM=,COLORTERM=, which probably does not support color
I0620 17:12:34.887031   48813 out.go:304] Setting ErrFile to fd 2...
I0620 17:12:34.887052   48813 out.go:338] TERM=,COLORTERM=, which probably does not support color
I0620 17:12:34.887309   48813 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19106-2452/.minikube/bin
I0620 17:12:34.888588   48813 config.go:182] Loaded profile config "functional-493344": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.30.2
I0620 17:12:34.888787   48813 config.go:182] Loaded profile config "functional-493344": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.30.2
I0620 17:12:34.889514   48813 cli_runner.go:164] Run: docker container inspect functional-493344 --format={{.State.Status}}
I0620 17:12:34.924955   48813 ssh_runner.go:195] Run: systemctl --version
I0620 17:12:34.925013   48813 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-493344
I0620 17:12:34.954434   48813 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32782 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/functional-493344/id_rsa Username:docker}
I0620 17:12:35.048024   48813 ssh_runner.go:195] Run: docker images --no-trunc --format "{{json .}}"
--- PASS: TestFunctional/parallel/ImageCommands/ImageListTable (0.26s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageListJson (0.25s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageListJson
=== PAUSE TestFunctional/parallel/ImageCommands/ImageListJson

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ImageCommands/ImageListJson
functional_test.go:260: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 image ls --format json --alsologtostderr
functional_test.go:265: (dbg) Stdout: out/minikube-linux-arm64 -p functional-493344 image ls --format json --alsologtostderr:
[{"id":"84c601f3f72c87776cdcf77a73329d1f45297e43a92508b0f289fa2fcf8872a0","repoDigests":[],"repoTags":["registry.k8s.io/kube-apiserver:v1.30.2"],"size":"112000000"},{"id":"66dbb96a9149f69913ff817f696be766014cacdffc2ce0889a76c81165415fae","repoDigests":[],"repoTags":["registry.k8s.io/kube-proxy:v1.30.2"],"size":"87900000"},{"id":"11ceee7cdc57225711b8382e1965974bbb259de14a9f5f7d6b9f161ced50a10a","repoDigests":[],"repoTags":["docker.io/library/nginx:latest"],"size":"193000000"},{"id":"014faa467e29798aeef733fe6d1a3b5e382688217b053ad23410e6cccd5d22fd","repoDigests":[],"repoTags":["registry.k8s.io/etcd:3.5.12-0"],"size":"139000000"},{"id":"1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c","repoDigests":[],"repoTags":["gcr.io/k8s-minikube/busybox:1.28.4-glibc"],"size":"3550000"},{"id":"8057e0500773a37cde2cff041eb13ebd68c748419a2fbfd1dfb5bf38696cc8e5","repoDigests":[],"repoTags":["registry.k8s.io/pause:3.1"],"size":"525000"},{"id":"0d14c413bec5341d74307ac9f182719ef76e576a174b81c9d2b92419f23119
a7","repoDigests":[],"repoTags":["docker.io/library/minikube-local-cache-test:functional-493344"],"size":"30"},{"id":"4f49228258b642594e55baf98d153d0e85f3fb989c1eb8450c520ed77bf27e65","repoDigests":[],"repoTags":["docker.io/library/nginx:alpine"],"size":"49700000"},{"id":"8cb2091f603e75187e2f6226c5901d12e00b1d1f778c6471ae4578e8a1c4724a","repoDigests":[],"repoTags":["registry.k8s.io/pause:latest"],"size":"240000"},{"id":"a422e0e982356f6c1cf0e5bb7b733363caae3992a07c99951fbcc73e58ed656a","repoDigests":[],"repoTags":["docker.io/kubernetesui/metrics-scraper:\u003cnone\u003e"],"size":"42300000"},{"id":"ffd4cfbbe753e62419e129ee2ac618beb94e51baa7471df5038b0b516b59cf91","repoDigests":[],"repoTags":["gcr.io/google-containers/addon-resizer:functional-493344"],"size":"32900000"},{"id":"3d18732f8686cc3c878055d99a05fa80289502fa496b36b6a0fe0f77206a7300","repoDigests":[],"repoTags":["registry.k8s.io/pause:3.3"],"size":"484000"},{"id":"72565bf5bbedfb62e9d21afa2b1221b2c7a5e05b746dae33430bc550d3f87beb","repoDigests":[],"repoTag
s":["registry.k8s.io/echoserver-arm:1.8"],"size":"85000000"},{"id":"c7dd04b1bafeb51c650fde7f34ac0fdafa96030e77ea7a822135ff302d895dd5","repoDigests":[],"repoTags":["registry.k8s.io/kube-scheduler:v1.30.2"],"size":"60500000"},{"id":"e1dcc3400d3ea6a268c7ea6e66c3a196703770a8e346b695f54344ab53a47567","repoDigests":[],"repoTags":["registry.k8s.io/kube-controller-manager:v1.30.2"],"size":"107000000"},{"id":"2437cf762177702dec2dfe99a09c37427a15af6d9a57c456b65352667c223d93","repoDigests":[],"repoTags":["registry.k8s.io/coredns/coredns:v1.11.1"],"size":"57400000"},{"id":"829e9de338bd5fdd3f16f68f83a9fb288fbc8453e881e5d5cfd0f6f2ff72b43e","repoDigests":[],"repoTags":["registry.k8s.io/pause:3.9"],"size":"514000"},{"id":"20b332c9a70d8516d849d1ac23eff5800cbb2f263d379f0ec11ee908db6b25a8","repoDigests":[],"repoTags":["docker.io/kubernetesui/dashboard:\u003cnone\u003e"],"size":"244000000"},{"id":"ba04bb24b95753201135cbc420b233c1b0b9fa2e1fd21d28319c348c33fbcde6","repoDigests":[],"repoTags":["gcr.io/k8s-minikube/storage-provision
er:v5"],"size":"29000000"}]
functional_test.go:268: (dbg) Stderr: out/minikube-linux-arm64 -p functional-493344 image ls --format json --alsologtostderr:
I0620 17:12:34.636294   48743 out.go:291] Setting OutFile to fd 1 ...
I0620 17:12:34.636440   48743 out.go:338] TERM=,COLORTERM=, which probably does not support color
I0620 17:12:34.636446   48743 out.go:304] Setting ErrFile to fd 2...
I0620 17:12:34.636451   48743 out.go:338] TERM=,COLORTERM=, which probably does not support color
I0620 17:12:34.636675   48743 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19106-2452/.minikube/bin
I0620 17:12:34.637320   48743 config.go:182] Loaded profile config "functional-493344": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.30.2
I0620 17:12:34.637439   48743 config.go:182] Loaded profile config "functional-493344": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.30.2
I0620 17:12:34.637952   48743 cli_runner.go:164] Run: docker container inspect functional-493344 --format={{.State.Status}}
I0620 17:12:34.666137   48743 ssh_runner.go:195] Run: systemctl --version
I0620 17:12:34.666207   48743 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-493344
I0620 17:12:34.694337   48743 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32782 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/functional-493344/id_rsa Username:docker}
I0620 17:12:34.795902   48743 ssh_runner.go:195] Run: docker images --no-trunc --format "{{json .}}"
--- PASS: TestFunctional/parallel/ImageCommands/ImageListJson (0.25s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageListYaml (0.22s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageListYaml
=== PAUSE TestFunctional/parallel/ImageCommands/ImageListYaml

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ImageCommands/ImageListYaml
functional_test.go:260: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 image ls --format yaml --alsologtostderr
functional_test.go:265: (dbg) Stdout: out/minikube-linux-arm64 -p functional-493344 image ls --format yaml --alsologtostderr:
- id: c7dd04b1bafeb51c650fde7f34ac0fdafa96030e77ea7a822135ff302d895dd5
repoDigests: []
repoTags:
- registry.k8s.io/kube-scheduler:v1.30.2
size: "60500000"
- id: 11ceee7cdc57225711b8382e1965974bbb259de14a9f5f7d6b9f161ced50a10a
repoDigests: []
repoTags:
- docker.io/library/nginx:latest
size: "193000000"
- id: 20b332c9a70d8516d849d1ac23eff5800cbb2f263d379f0ec11ee908db6b25a8
repoDigests: []
repoTags:
- docker.io/kubernetesui/dashboard:<none>
size: "244000000"
- id: a422e0e982356f6c1cf0e5bb7b733363caae3992a07c99951fbcc73e58ed656a
repoDigests: []
repoTags:
- docker.io/kubernetesui/metrics-scraper:<none>
size: "42300000"
- id: 1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c
repoDigests: []
repoTags:
- gcr.io/k8s-minikube/busybox:1.28.4-glibc
size: "3550000"
- id: 66dbb96a9149f69913ff817f696be766014cacdffc2ce0889a76c81165415fae
repoDigests: []
repoTags:
- registry.k8s.io/kube-proxy:v1.30.2
size: "87900000"
- id: 4f49228258b642594e55baf98d153d0e85f3fb989c1eb8450c520ed77bf27e65
repoDigests: []
repoTags:
- docker.io/library/nginx:alpine
size: "49700000"
- id: ffd4cfbbe753e62419e129ee2ac618beb94e51baa7471df5038b0b516b59cf91
repoDigests: []
repoTags:
- gcr.io/google-containers/addon-resizer:functional-493344
size: "32900000"
- id: 3d18732f8686cc3c878055d99a05fa80289502fa496b36b6a0fe0f77206a7300
repoDigests: []
repoTags:
- registry.k8s.io/pause:3.3
size: "484000"
- id: 72565bf5bbedfb62e9d21afa2b1221b2c7a5e05b746dae33430bc550d3f87beb
repoDigests: []
repoTags:
- registry.k8s.io/echoserver-arm:1.8
size: "85000000"
- id: 84c601f3f72c87776cdcf77a73329d1f45297e43a92508b0f289fa2fcf8872a0
repoDigests: []
repoTags:
- registry.k8s.io/kube-apiserver:v1.30.2
size: "112000000"
- id: 014faa467e29798aeef733fe6d1a3b5e382688217b053ad23410e6cccd5d22fd
repoDigests: []
repoTags:
- registry.k8s.io/etcd:3.5.12-0
size: "139000000"
- id: 2437cf762177702dec2dfe99a09c37427a15af6d9a57c456b65352667c223d93
repoDigests: []
repoTags:
- registry.k8s.io/coredns/coredns:v1.11.1
size: "57400000"
- id: 829e9de338bd5fdd3f16f68f83a9fb288fbc8453e881e5d5cfd0f6f2ff72b43e
repoDigests: []
repoTags:
- registry.k8s.io/pause:3.9
size: "514000"
- id: 8057e0500773a37cde2cff041eb13ebd68c748419a2fbfd1dfb5bf38696cc8e5
repoDigests: []
repoTags:
- registry.k8s.io/pause:3.1
size: "525000"
- id: 8cb2091f603e75187e2f6226c5901d12e00b1d1f778c6471ae4578e8a1c4724a
repoDigests: []
repoTags:
- registry.k8s.io/pause:latest
size: "240000"
- id: 0d14c413bec5341d74307ac9f182719ef76e576a174b81c9d2b92419f23119a7
repoDigests: []
repoTags:
- docker.io/library/minikube-local-cache-test:functional-493344
size: "30"
- id: e1dcc3400d3ea6a268c7ea6e66c3a196703770a8e346b695f54344ab53a47567
repoDigests: []
repoTags:
- registry.k8s.io/kube-controller-manager:v1.30.2
size: "107000000"
- id: ba04bb24b95753201135cbc420b233c1b0b9fa2e1fd21d28319c348c33fbcde6
repoDigests: []
repoTags:
- gcr.io/k8s-minikube/storage-provisioner:v5
size: "29000000"

                                                
                                                
functional_test.go:268: (dbg) Stderr: out/minikube-linux-arm64 -p functional-493344 image ls --format yaml --alsologtostderr:
I0620 17:12:34.386336   48681 out.go:291] Setting OutFile to fd 1 ...
I0620 17:12:34.386509   48681 out.go:338] TERM=,COLORTERM=, which probably does not support color
I0620 17:12:34.386535   48681 out.go:304] Setting ErrFile to fd 2...
I0620 17:12:34.386555   48681 out.go:338] TERM=,COLORTERM=, which probably does not support color
I0620 17:12:34.386868   48681 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19106-2452/.minikube/bin
I0620 17:12:34.387539   48681 config.go:182] Loaded profile config "functional-493344": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.30.2
I0620 17:12:34.387725   48681 config.go:182] Loaded profile config "functional-493344": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.30.2
I0620 17:12:34.388244   48681 cli_runner.go:164] Run: docker container inspect functional-493344 --format={{.State.Status}}
I0620 17:12:34.404965   48681 ssh_runner.go:195] Run: systemctl --version
I0620 17:12:34.405028   48681 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-493344
I0620 17:12:34.423638   48681 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32782 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/functional-493344/id_rsa Username:docker}
I0620 17:12:34.515547   48681 ssh_runner.go:195] Run: docker images --no-trunc --format "{{json .}}"
--- PASS: TestFunctional/parallel/ImageCommands/ImageListYaml (0.22s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageBuild (2.43s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageBuild
=== PAUSE TestFunctional/parallel/ImageCommands/ImageBuild

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ImageCommands/ImageBuild
functional_test.go:307: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 ssh pgrep buildkitd
functional_test.go:307: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-493344 ssh pgrep buildkitd: exit status 1 (317.947605ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
functional_test.go:314: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 image build -t localhost/my-image:functional-493344 testdata/build --alsologtostderr
functional_test.go:314: (dbg) Done: out/minikube-linux-arm64 -p functional-493344 image build -t localhost/my-image:functional-493344 testdata/build --alsologtostderr: (1.897032549s)
functional_test.go:322: (dbg) Stderr: out/minikube-linux-arm64 -p functional-493344 image build -t localhost/my-image:functional-493344 testdata/build --alsologtostderr:
I0620 17:12:34.928632   48820 out.go:291] Setting OutFile to fd 1 ...
I0620 17:12:34.928848   48820 out.go:338] TERM=,COLORTERM=, which probably does not support color
I0620 17:12:34.928859   48820 out.go:304] Setting ErrFile to fd 2...
I0620 17:12:34.928863   48820 out.go:338] TERM=,COLORTERM=, which probably does not support color
I0620 17:12:34.929101   48820 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19106-2452/.minikube/bin
I0620 17:12:34.929785   48820 config.go:182] Loaded profile config "functional-493344": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.30.2
I0620 17:12:34.930431   48820 config.go:182] Loaded profile config "functional-493344": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.30.2
I0620 17:12:34.930977   48820 cli_runner.go:164] Run: docker container inspect functional-493344 --format={{.State.Status}}
I0620 17:12:34.955925   48820 ssh_runner.go:195] Run: systemctl --version
I0620 17:12:34.955975   48820 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-493344
I0620 17:12:34.977173   48820 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32782 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/functional-493344/id_rsa Username:docker}
I0620 17:12:35.080105   48820 build_images.go:161] Building image from path: /tmp/build.1260678109.tar
I0620 17:12:35.080175   48820 ssh_runner.go:195] Run: sudo mkdir -p /var/lib/minikube/build
I0620 17:12:35.091440   48820 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/build/build.1260678109.tar
I0620 17:12:35.098914   48820 ssh_runner.go:352] existence check for /var/lib/minikube/build/build.1260678109.tar: stat -c "%s %y" /var/lib/minikube/build/build.1260678109.tar: Process exited with status 1
stdout:

                                                
                                                
stderr:
stat: cannot statx '/var/lib/minikube/build/build.1260678109.tar': No such file or directory
I0620 17:12:35.098969   48820 ssh_runner.go:362] scp /tmp/build.1260678109.tar --> /var/lib/minikube/build/build.1260678109.tar (3072 bytes)
I0620 17:12:35.126514   48820 ssh_runner.go:195] Run: sudo mkdir -p /var/lib/minikube/build/build.1260678109
I0620 17:12:35.135774   48820 ssh_runner.go:195] Run: sudo tar -C /var/lib/minikube/build/build.1260678109 -xf /var/lib/minikube/build/build.1260678109.tar
I0620 17:12:35.145434   48820 docker.go:360] Building image: /var/lib/minikube/build/build.1260678109
I0620 17:12:35.145511   48820 ssh_runner.go:195] Run: docker build -t localhost/my-image:functional-493344 /var/lib/minikube/build/build.1260678109
#0 building with "default" instance using docker driver

                                                
                                                
#1 [internal] load build definition from Dockerfile
#1 transferring dockerfile: 97B done
#1 DONE 0.0s

                                                
                                                
#2 [internal] load metadata for gcr.io/k8s-minikube/busybox:latest
#2 DONE 0.6s

                                                
                                                
#3 [internal] load .dockerignore
#3 transferring context: 2B done
#3 DONE 0.0s

                                                
                                                
#4 [internal] load build context
#4 transferring context: 62B done
#4 DONE 0.0s

                                                
                                                
#5 [1/3] FROM gcr.io/k8s-minikube/busybox:latest@sha256:ca5ae90100d50772da31f3b5016209e25ad61972404e2ccd83d44f10dee7e79b
#5 resolve gcr.io/k8s-minikube/busybox:latest@sha256:ca5ae90100d50772da31f3b5016209e25ad61972404e2ccd83d44f10dee7e79b 0.0s done
#5 sha256:a01966dde7f8d5ba10b6d87e776c7c8fb5a5f6bfa678874bd28b33b1fc6dba34 0B / 828.50kB 0.1s
#5 sha256:ca5ae90100d50772da31f3b5016209e25ad61972404e2ccd83d44f10dee7e79b 770B / 770B done
#5 sha256:a77fe109c026308f149d36484d795b42efe0fd29b332be9071f63e1634c36ac9 527B / 527B done
#5 sha256:71a676dd070f4b701c3272e566d84951362f1326ea07d5bbad119d1c4f6b3d02 1.47kB / 1.47kB done
#5 sha256:a01966dde7f8d5ba10b6d87e776c7c8fb5a5f6bfa678874bd28b33b1fc6dba34 828.50kB / 828.50kB 0.1s done
#5 extracting sha256:a01966dde7f8d5ba10b6d87e776c7c8fb5a5f6bfa678874bd28b33b1fc6dba34 0.0s done
#5 DONE 0.3s

                                                
                                                
#6 [2/3] RUN true
#6 DONE 0.3s

                                                
                                                
#7 [3/3] ADD content.txt /
#7 DONE 0.0s

                                                
                                                
#8 exporting to image
#8 exporting layers 0.0s done
#8 writing image sha256:5bd72778373fca18635845aba3bc48eba2e0d28b1348fa49ee0426987f4204c5 done
#8 naming to localhost/my-image:functional-493344 done
#8 DONE 0.1s
I0620 17:12:36.735960   48820 ssh_runner.go:235] Completed: docker build -t localhost/my-image:functional-493344 /var/lib/minikube/build/build.1260678109: (1.590420094s)
I0620 17:12:36.736030   48820 ssh_runner.go:195] Run: sudo rm -rf /var/lib/minikube/build/build.1260678109
I0620 17:12:36.744997   48820 ssh_runner.go:195] Run: sudo rm -f /var/lib/minikube/build/build.1260678109.tar
I0620 17:12:36.753744   48820 build_images.go:217] Built localhost/my-image:functional-493344 from /tmp/build.1260678109.tar
I0620 17:12:36.753776   48820 build_images.go:133] succeeded building to: functional-493344
I0620 17:12:36.753781   48820 build_images.go:134] failed building to: 
functional_test.go:447: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 image ls
--- PASS: TestFunctional/parallel/ImageCommands/ImageBuild (2.43s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/Setup (1.93s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/Setup
functional_test.go:341: (dbg) Run:  docker pull gcr.io/google-containers/addon-resizer:1.8.8
functional_test.go:341: (dbg) Done: docker pull gcr.io/google-containers/addon-resizer:1.8.8: (1.906113796s)
functional_test.go:346: (dbg) Run:  docker tag gcr.io/google-containers/addon-resizer:1.8.8 gcr.io/google-containers/addon-resizer:functional-493344
--- PASS: TestFunctional/parallel/ImageCommands/Setup (1.93s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageLoadDaemon (4.53s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageLoadDaemon
functional_test.go:354: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 image load --daemon gcr.io/google-containers/addon-resizer:functional-493344 --alsologtostderr
2024/06/20 17:12:18 [DEBUG] GET http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/
functional_test.go:354: (dbg) Done: out/minikube-linux-arm64 -p functional-493344 image load --daemon gcr.io/google-containers/addon-resizer:functional-493344 --alsologtostderr: (4.294559329s)
functional_test.go:447: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 image ls
--- PASS: TestFunctional/parallel/ImageCommands/ImageLoadDaemon (4.53s)

                                                
                                    
x
+
TestFunctional/parallel/UpdateContextCmd/no_changes (0.2s)

                                                
                                                
=== RUN   TestFunctional/parallel/UpdateContextCmd/no_changes
=== PAUSE TestFunctional/parallel/UpdateContextCmd/no_changes

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/UpdateContextCmd/no_changes
functional_test.go:2115: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 update-context --alsologtostderr -v=2
--- PASS: TestFunctional/parallel/UpdateContextCmd/no_changes (0.20s)

                                                
                                    
x
+
TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster (0.2s)

                                                
                                                
=== RUN   TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster
=== PAUSE TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster
functional_test.go:2115: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 update-context --alsologtostderr -v=2
--- PASS: TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster (0.20s)

                                                
                                    
x
+
TestFunctional/parallel/UpdateContextCmd/no_clusters (0.16s)

                                                
                                                
=== RUN   TestFunctional/parallel/UpdateContextCmd/no_clusters
=== PAUSE TestFunctional/parallel/UpdateContextCmd/no_clusters

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/UpdateContextCmd/no_clusters
functional_test.go:2115: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 update-context --alsologtostderr -v=2
--- PASS: TestFunctional/parallel/UpdateContextCmd/no_clusters (0.16s)

                                                
                                    
x
+
TestFunctional/parallel/DockerEnv/bash (1.33s)

                                                
                                                
=== RUN   TestFunctional/parallel/DockerEnv/bash
functional_test.go:495: (dbg) Run:  /bin/bash -c "eval $(out/minikube-linux-arm64 -p functional-493344 docker-env) && out/minikube-linux-arm64 status -p functional-493344"
functional_test.go:518: (dbg) Run:  /bin/bash -c "eval $(out/minikube-linux-arm64 -p functional-493344 docker-env) && docker images"
--- PASS: TestFunctional/parallel/DockerEnv/bash (1.33s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageReloadDaemon (2.98s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageReloadDaemon
functional_test.go:364: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 image load --daemon gcr.io/google-containers/addon-resizer:functional-493344 --alsologtostderr
functional_test.go:364: (dbg) Done: out/minikube-linux-arm64 -p functional-493344 image load --daemon gcr.io/google-containers/addon-resizer:functional-493344 --alsologtostderr: (2.767480402s)
functional_test.go:447: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 image ls
--- PASS: TestFunctional/parallel/ImageCommands/ImageReloadDaemon (2.98s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageTagAndLoadDaemon (5.43s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageTagAndLoadDaemon
functional_test.go:234: (dbg) Run:  docker pull gcr.io/google-containers/addon-resizer:1.8.9
functional_test.go:234: (dbg) Done: docker pull gcr.io/google-containers/addon-resizer:1.8.9: (1.843548606s)
functional_test.go:239: (dbg) Run:  docker tag gcr.io/google-containers/addon-resizer:1.8.9 gcr.io/google-containers/addon-resizer:functional-493344
functional_test.go:244: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 image load --daemon gcr.io/google-containers/addon-resizer:functional-493344 --alsologtostderr
functional_test.go:244: (dbg) Done: out/minikube-linux-arm64 -p functional-493344 image load --daemon gcr.io/google-containers/addon-resizer:functional-493344 --alsologtostderr: (3.367135121s)
functional_test.go:447: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 image ls
--- PASS: TestFunctional/parallel/ImageCommands/ImageTagAndLoadDaemon (5.43s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageSaveToFile (0.9s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageSaveToFile
functional_test.go:379: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 image save gcr.io/google-containers/addon-resizer:functional-493344 /home/jenkins/workspace/Docker_Linux_docker_arm64/addon-resizer-save.tar --alsologtostderr
--- PASS: TestFunctional/parallel/ImageCommands/ImageSaveToFile (0.90s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageRemove (0.45s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageRemove
functional_test.go:391: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 image rm gcr.io/google-containers/addon-resizer:functional-493344 --alsologtostderr
functional_test.go:447: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 image ls
--- PASS: TestFunctional/parallel/ImageCommands/ImageRemove (0.45s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageLoadFromFile (1.28s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageLoadFromFile
functional_test.go:408: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 image load /home/jenkins/workspace/Docker_Linux_docker_arm64/addon-resizer-save.tar --alsologtostderr
functional_test.go:408: (dbg) Done: out/minikube-linux-arm64 -p functional-493344 image load /home/jenkins/workspace/Docker_Linux_docker_arm64/addon-resizer-save.tar --alsologtostderr: (1.081215339s)
functional_test.go:447: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 image ls
--- PASS: TestFunctional/parallel/ImageCommands/ImageLoadFromFile (1.28s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageSaveDaemon (0.96s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageSaveDaemon
functional_test.go:418: (dbg) Run:  docker rmi gcr.io/google-containers/addon-resizer:functional-493344
functional_test.go:423: (dbg) Run:  out/minikube-linux-arm64 -p functional-493344 image save --daemon gcr.io/google-containers/addon-resizer:functional-493344 --alsologtostderr
functional_test.go:428: (dbg) Run:  docker image inspect gcr.io/google-containers/addon-resizer:functional-493344
--- PASS: TestFunctional/parallel/ImageCommands/ImageSaveDaemon (0.96s)

                                                
                                    
x
+
TestFunctional/delete_addon-resizer_images (0.08s)

                                                
                                                
=== RUN   TestFunctional/delete_addon-resizer_images
functional_test.go:189: (dbg) Run:  docker rmi -f gcr.io/google-containers/addon-resizer:1.8.8
functional_test.go:189: (dbg) Run:  docker rmi -f gcr.io/google-containers/addon-resizer:functional-493344
--- PASS: TestFunctional/delete_addon-resizer_images (0.08s)

                                                
                                    
x
+
TestFunctional/delete_my-image_image (0.02s)

                                                
                                                
=== RUN   TestFunctional/delete_my-image_image
functional_test.go:197: (dbg) Run:  docker rmi -f localhost/my-image:functional-493344
--- PASS: TestFunctional/delete_my-image_image (0.02s)

                                                
                                    
x
+
TestFunctional/delete_minikube_cached_images (0.02s)

                                                
                                                
=== RUN   TestFunctional/delete_minikube_cached_images
functional_test.go:205: (dbg) Run:  docker rmi -f minikube-local-cache-test:functional-493344
--- PASS: TestFunctional/delete_minikube_cached_images (0.02s)

                                                
                                    
x
+
TestMultiControlPlane/serial/StartCluster (137.46s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/StartCluster
ha_test.go:101: (dbg) Run:  out/minikube-linux-arm64 start -p ha-621968 --wait=true --memory=2200 --ha -v=7 --alsologtostderr --driver=docker  --container-runtime=docker
E0620 17:13:20.803515    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt: no such file or directory
ha_test.go:101: (dbg) Done: out/minikube-linux-arm64 start -p ha-621968 --wait=true --memory=2200 --ha -v=7 --alsologtostderr --driver=docker  --container-runtime=docker: (2m16.663184718s)
ha_test.go:107: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 status -v=7 --alsologtostderr
--- PASS: TestMultiControlPlane/serial/StartCluster (137.46s)

                                                
                                    
x
+
TestMultiControlPlane/serial/DeployApp (41.33s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/DeployApp
ha_test.go:128: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-621968 -- apply -f ./testdata/ha/ha-pod-dns-test.yaml
ha_test.go:133: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-621968 -- rollout status deployment/busybox
ha_test.go:133: (dbg) Done: out/minikube-linux-arm64 kubectl -p ha-621968 -- rollout status deployment/busybox: (4.129103177s)
ha_test.go:140: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-621968 -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-621968 -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-621968 -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-621968 -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-621968 -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-621968 -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-621968 -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-621968 -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:163: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-621968 -- get pods -o jsonpath='{.items[*].metadata.name}'
ha_test.go:171: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-621968 -- exec busybox-fc5497c4f-8mt4d -- nslookup kubernetes.io
ha_test.go:171: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-621968 -- exec busybox-fc5497c4f-nmkr6 -- nslookup kubernetes.io
ha_test.go:171: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-621968 -- exec busybox-fc5497c4f-pc47w -- nslookup kubernetes.io
E0620 17:15:36.957319    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt: no such file or directory
ha_test.go:181: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-621968 -- exec busybox-fc5497c4f-8mt4d -- nslookup kubernetes.default
ha_test.go:181: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-621968 -- exec busybox-fc5497c4f-nmkr6 -- nslookup kubernetes.default
ha_test.go:181: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-621968 -- exec busybox-fc5497c4f-pc47w -- nslookup kubernetes.default
ha_test.go:189: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-621968 -- exec busybox-fc5497c4f-8mt4d -- nslookup kubernetes.default.svc.cluster.local
ha_test.go:189: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-621968 -- exec busybox-fc5497c4f-nmkr6 -- nslookup kubernetes.default.svc.cluster.local
ha_test.go:189: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-621968 -- exec busybox-fc5497c4f-pc47w -- nslookup kubernetes.default.svc.cluster.local
--- PASS: TestMultiControlPlane/serial/DeployApp (41.33s)

                                                
                                    
x
+
TestMultiControlPlane/serial/PingHostFromPods (1.73s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/PingHostFromPods
ha_test.go:199: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-621968 -- get pods -o jsonpath='{.items[*].metadata.name}'
ha_test.go:207: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-621968 -- exec busybox-fc5497c4f-8mt4d -- sh -c "nslookup host.minikube.internal | awk 'NR==5' | cut -d' ' -f3"
ha_test.go:218: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-621968 -- exec busybox-fc5497c4f-8mt4d -- sh -c "ping -c 1 192.168.49.1"
ha_test.go:207: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-621968 -- exec busybox-fc5497c4f-nmkr6 -- sh -c "nslookup host.minikube.internal | awk 'NR==5' | cut -d' ' -f3"
ha_test.go:218: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-621968 -- exec busybox-fc5497c4f-nmkr6 -- sh -c "ping -c 1 192.168.49.1"
ha_test.go:207: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-621968 -- exec busybox-fc5497c4f-pc47w -- sh -c "nslookup host.minikube.internal | awk 'NR==5' | cut -d' ' -f3"
ha_test.go:218: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-621968 -- exec busybox-fc5497c4f-pc47w -- sh -c "ping -c 1 192.168.49.1"
--- PASS: TestMultiControlPlane/serial/PingHostFromPods (1.73s)

                                                
                                    
x
+
TestMultiControlPlane/serial/AddWorkerNode (25.92s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/AddWorkerNode
ha_test.go:228: (dbg) Run:  out/minikube-linux-arm64 node add -p ha-621968 -v=7 --alsologtostderr
E0620 17:16:04.643992    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt: no such file or directory
ha_test.go:228: (dbg) Done: out/minikube-linux-arm64 node add -p ha-621968 -v=7 --alsologtostderr: (24.834477421s)
ha_test.go:234: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 status -v=7 --alsologtostderr
ha_test.go:234: (dbg) Done: out/minikube-linux-arm64 -p ha-621968 status -v=7 --alsologtostderr: (1.083354393s)
--- PASS: TestMultiControlPlane/serial/AddWorkerNode (25.92s)

                                                
                                    
x
+
TestMultiControlPlane/serial/NodeLabels (0.12s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/NodeLabels
ha_test.go:255: (dbg) Run:  kubectl --context ha-621968 get nodes -o "jsonpath=[{range .items[*]}{.metadata.labels},{end}]"
--- PASS: TestMultiControlPlane/serial/NodeLabels (0.12s)

                                                
                                    
x
+
TestMultiControlPlane/serial/HAppyAfterClusterStart (0.74s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/HAppyAfterClusterStart
ha_test.go:281: (dbg) Run:  out/minikube-linux-arm64 profile list --output json
--- PASS: TestMultiControlPlane/serial/HAppyAfterClusterStart (0.74s)

                                                
                                    
x
+
TestMultiControlPlane/serial/CopyFile (19.06s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/CopyFile
ha_test.go:326: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 status --output json -v=7 --alsologtostderr
ha_test.go:326: (dbg) Done: out/minikube-linux-arm64 -p ha-621968 status --output json -v=7 --alsologtostderr: (1.025546926s)
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 cp testdata/cp-test.txt ha-621968:/home/docker/cp-test.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 ssh -n ha-621968 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 cp ha-621968:/home/docker/cp-test.txt /tmp/TestMultiControlPlaneserialCopyFile940065394/001/cp-test_ha-621968.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 ssh -n ha-621968 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 cp ha-621968:/home/docker/cp-test.txt ha-621968-m02:/home/docker/cp-test_ha-621968_ha-621968-m02.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 ssh -n ha-621968 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 ssh -n ha-621968-m02 "sudo cat /home/docker/cp-test_ha-621968_ha-621968-m02.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 cp ha-621968:/home/docker/cp-test.txt ha-621968-m03:/home/docker/cp-test_ha-621968_ha-621968-m03.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 ssh -n ha-621968 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 ssh -n ha-621968-m03 "sudo cat /home/docker/cp-test_ha-621968_ha-621968-m03.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 cp ha-621968:/home/docker/cp-test.txt ha-621968-m04:/home/docker/cp-test_ha-621968_ha-621968-m04.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 ssh -n ha-621968 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 ssh -n ha-621968-m04 "sudo cat /home/docker/cp-test_ha-621968_ha-621968-m04.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 cp testdata/cp-test.txt ha-621968-m02:/home/docker/cp-test.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 ssh -n ha-621968-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 cp ha-621968-m02:/home/docker/cp-test.txt /tmp/TestMultiControlPlaneserialCopyFile940065394/001/cp-test_ha-621968-m02.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 ssh -n ha-621968-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 cp ha-621968-m02:/home/docker/cp-test.txt ha-621968:/home/docker/cp-test_ha-621968-m02_ha-621968.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 ssh -n ha-621968-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 ssh -n ha-621968 "sudo cat /home/docker/cp-test_ha-621968-m02_ha-621968.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 cp ha-621968-m02:/home/docker/cp-test.txt ha-621968-m03:/home/docker/cp-test_ha-621968-m02_ha-621968-m03.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 ssh -n ha-621968-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 ssh -n ha-621968-m03 "sudo cat /home/docker/cp-test_ha-621968-m02_ha-621968-m03.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 cp ha-621968-m02:/home/docker/cp-test.txt ha-621968-m04:/home/docker/cp-test_ha-621968-m02_ha-621968-m04.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 ssh -n ha-621968-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 ssh -n ha-621968-m04 "sudo cat /home/docker/cp-test_ha-621968-m02_ha-621968-m04.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 cp testdata/cp-test.txt ha-621968-m03:/home/docker/cp-test.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 ssh -n ha-621968-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 cp ha-621968-m03:/home/docker/cp-test.txt /tmp/TestMultiControlPlaneserialCopyFile940065394/001/cp-test_ha-621968-m03.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 ssh -n ha-621968-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 cp ha-621968-m03:/home/docker/cp-test.txt ha-621968:/home/docker/cp-test_ha-621968-m03_ha-621968.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 ssh -n ha-621968-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 ssh -n ha-621968 "sudo cat /home/docker/cp-test_ha-621968-m03_ha-621968.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 cp ha-621968-m03:/home/docker/cp-test.txt ha-621968-m02:/home/docker/cp-test_ha-621968-m03_ha-621968-m02.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 ssh -n ha-621968-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 ssh -n ha-621968-m02 "sudo cat /home/docker/cp-test_ha-621968-m03_ha-621968-m02.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 cp ha-621968-m03:/home/docker/cp-test.txt ha-621968-m04:/home/docker/cp-test_ha-621968-m03_ha-621968-m04.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 ssh -n ha-621968-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 ssh -n ha-621968-m04 "sudo cat /home/docker/cp-test_ha-621968-m03_ha-621968-m04.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 cp testdata/cp-test.txt ha-621968-m04:/home/docker/cp-test.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 ssh -n ha-621968-m04 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 cp ha-621968-m04:/home/docker/cp-test.txt /tmp/TestMultiControlPlaneserialCopyFile940065394/001/cp-test_ha-621968-m04.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 ssh -n ha-621968-m04 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 cp ha-621968-m04:/home/docker/cp-test.txt ha-621968:/home/docker/cp-test_ha-621968-m04_ha-621968.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 ssh -n ha-621968-m04 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 ssh -n ha-621968 "sudo cat /home/docker/cp-test_ha-621968-m04_ha-621968.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 cp ha-621968-m04:/home/docker/cp-test.txt ha-621968-m02:/home/docker/cp-test_ha-621968-m04_ha-621968-m02.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 ssh -n ha-621968-m04 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 ssh -n ha-621968-m02 "sudo cat /home/docker/cp-test_ha-621968-m04_ha-621968-m02.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 cp ha-621968-m04:/home/docker/cp-test.txt ha-621968-m03:/home/docker/cp-test_ha-621968-m04_ha-621968-m03.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 ssh -n ha-621968-m04 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 ssh -n ha-621968-m03 "sudo cat /home/docker/cp-test_ha-621968-m04_ha-621968-m03.txt"
--- PASS: TestMultiControlPlane/serial/CopyFile (19.06s)

                                                
                                    
x
+
TestMultiControlPlane/serial/StopSecondaryNode (11.84s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/StopSecondaryNode
ha_test.go:363: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 node stop m02 -v=7 --alsologtostderr
E0620 17:16:33.078963    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/functional-493344/client.crt: no such file or directory
E0620 17:16:33.084292    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/functional-493344/client.crt: no such file or directory
E0620 17:16:33.094577    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/functional-493344/client.crt: no such file or directory
E0620 17:16:33.114811    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/functional-493344/client.crt: no such file or directory
E0620 17:16:33.155066    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/functional-493344/client.crt: no such file or directory
E0620 17:16:33.235329    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/functional-493344/client.crt: no such file or directory
E0620 17:16:33.395630    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/functional-493344/client.crt: no such file or directory
E0620 17:16:33.716203    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/functional-493344/client.crt: no such file or directory
E0620 17:16:34.356982    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/functional-493344/client.crt: no such file or directory
E0620 17:16:35.637188    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/functional-493344/client.crt: no such file or directory
ha_test.go:363: (dbg) Done: out/minikube-linux-arm64 -p ha-621968 node stop m02 -v=7 --alsologtostderr: (11.02813902s)
ha_test.go:369: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 status -v=7 --alsologtostderr
ha_test.go:369: (dbg) Non-zero exit: out/minikube-linux-arm64 -p ha-621968 status -v=7 --alsologtostderr: exit status 7 (811.600529ms)

                                                
                                                
-- stdout --
	ha-621968
	type: Control Plane
	host: Running
	kubelet: Running
	apiserver: Running
	kubeconfig: Configured
	
	ha-621968-m02
	type: Control Plane
	host: Stopped
	kubelet: Stopped
	apiserver: Stopped
	kubeconfig: Stopped
	
	ha-621968-m03
	type: Control Plane
	host: Running
	kubelet: Running
	apiserver: Running
	kubeconfig: Configured
	
	ha-621968-m04
	type: Worker
	host: Running
	kubelet: Running
	

                                                
                                                
-- /stdout --
** stderr ** 
	I0620 17:16:37.172042   69716 out.go:291] Setting OutFile to fd 1 ...
	I0620 17:16:37.172241   69716 out.go:338] TERM=,COLORTERM=, which probably does not support color
	I0620 17:16:37.172267   69716 out.go:304] Setting ErrFile to fd 2...
	I0620 17:16:37.172287   69716 out.go:338] TERM=,COLORTERM=, which probably does not support color
	I0620 17:16:37.172656   69716 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19106-2452/.minikube/bin
	I0620 17:16:37.172961   69716 out.go:298] Setting JSON to false
	I0620 17:16:37.173033   69716 mustload.go:65] Loading cluster: ha-621968
	I0620 17:16:37.173134   69716 notify.go:220] Checking for updates...
	I0620 17:16:37.173504   69716 config.go:182] Loaded profile config "ha-621968": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.30.2
	I0620 17:16:37.173543   69716 status.go:255] checking status of ha-621968 ...
	I0620 17:16:37.174396   69716 cli_runner.go:164] Run: docker container inspect ha-621968 --format={{.State.Status}}
	I0620 17:16:37.193135   69716 status.go:330] ha-621968 host status = "Running" (err=<nil>)
	I0620 17:16:37.193167   69716 host.go:66] Checking if "ha-621968" exists ...
	I0620 17:16:37.193470   69716 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-621968
	I0620 17:16:37.218421   69716 host.go:66] Checking if "ha-621968" exists ...
	I0620 17:16:37.219301   69716 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I0620 17:16:37.219372   69716 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-621968
	I0620 17:16:37.239410   69716 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32787 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/ha-621968/id_rsa Username:docker}
	I0620 17:16:37.348492   69716 ssh_runner.go:195] Run: systemctl --version
	I0620 17:16:37.353516   69716 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I0620 17:16:37.367280   69716 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0620 17:16:37.445634   69716 info.go:266] docker info: {ID:6ZPO:QZND:VNGE:LUKL:4Y3K:XELL:AAX4:2GTK:E6LM:MPRN:3ZXR:TTMR Containers:4 ContainersRunning:3 ContainersPaused:0 ContainersStopped:1 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:53 OomKillDisable:true NGoroutines:72 SystemTime:2024-06-20 17:16:37.434952389 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1063-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aar
ch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214900736 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-30-239 Labels:[] ExperimentalBuild:false ServerVersion:26.1.4 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d2d58213f83a351ca8f528a95fbd145f5654e957 Expected:d2d58213f83a351ca8f528a95fbd145f5654e957} RuncCommit:{ID:v1.1.12-0-g51d5e94 Expected:v1.1.12-0-g51d5e94} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErro
rs:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.14.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.27.1]] Warnings:<nil>}}
	I0620 17:16:37.446208   69716 kubeconfig.go:125] found "ha-621968" server: "https://192.168.49.254:8443"
	I0620 17:16:37.446238   69716 api_server.go:166] Checking apiserver status ...
	I0620 17:16:37.446291   69716 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I0620 17:16:37.458384   69716 ssh_runner.go:195] Run: sudo egrep ^[0-9]+:freezer: /proc/2203/cgroup
	I0620 17:16:37.469114   69716 api_server.go:182] apiserver freezer: "6:freezer:/docker/de252c9563552394b2829f9a3cb133df453f6b97fa34af3e20ad9cbe749e4808/kubepods/burstable/podd7461b9c8b26248a34f87dcfd467dc9c/86f08f980f5886be90273a3ad62338d4f4f97b5f045b83d5f0b14fe39639a4ba"
	I0620 17:16:37.469232   69716 ssh_runner.go:195] Run: sudo cat /sys/fs/cgroup/freezer/docker/de252c9563552394b2829f9a3cb133df453f6b97fa34af3e20ad9cbe749e4808/kubepods/burstable/podd7461b9c8b26248a34f87dcfd467dc9c/86f08f980f5886be90273a3ad62338d4f4f97b5f045b83d5f0b14fe39639a4ba/freezer.state
	I0620 17:16:37.477910   69716 api_server.go:204] freezer state: "THAWED"
	I0620 17:16:37.477939   69716 api_server.go:253] Checking apiserver healthz at https://192.168.49.254:8443/healthz ...
	I0620 17:16:37.485775   69716 api_server.go:279] https://192.168.49.254:8443/healthz returned 200:
	ok
	I0620 17:16:37.485805   69716 status.go:422] ha-621968 apiserver status = Running (err=<nil>)
	I0620 17:16:37.485824   69716 status.go:257] ha-621968 status: &{Name:ha-621968 Host:Running Kubelet:Running APIServer:Running Kubeconfig:Configured Worker:false TimeToStop: DockerEnv: PodManEnv:}
	I0620 17:16:37.485862   69716 status.go:255] checking status of ha-621968-m02 ...
	I0620 17:16:37.486211   69716 cli_runner.go:164] Run: docker container inspect ha-621968-m02 --format={{.State.Status}}
	I0620 17:16:37.509740   69716 status.go:330] ha-621968-m02 host status = "Stopped" (err=<nil>)
	I0620 17:16:37.509767   69716 status.go:343] host is not running, skipping remaining checks
	I0620 17:16:37.509775   69716 status.go:257] ha-621968-m02 status: &{Name:ha-621968-m02 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:false TimeToStop: DockerEnv: PodManEnv:}
	I0620 17:16:37.509814   69716 status.go:255] checking status of ha-621968-m03 ...
	I0620 17:16:37.510303   69716 cli_runner.go:164] Run: docker container inspect ha-621968-m03 --format={{.State.Status}}
	I0620 17:16:37.528913   69716 status.go:330] ha-621968-m03 host status = "Running" (err=<nil>)
	I0620 17:16:37.528942   69716 host.go:66] Checking if "ha-621968-m03" exists ...
	I0620 17:16:37.529288   69716 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-621968-m03
	I0620 17:16:37.548204   69716 host.go:66] Checking if "ha-621968-m03" exists ...
	I0620 17:16:37.548522   69716 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I0620 17:16:37.548563   69716 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-621968-m03
	I0620 17:16:37.575057   69716 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32797 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/ha-621968-m03/id_rsa Username:docker}
	I0620 17:16:37.668496   69716 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I0620 17:16:37.682734   69716 kubeconfig.go:125] found "ha-621968" server: "https://192.168.49.254:8443"
	I0620 17:16:37.682765   69716 api_server.go:166] Checking apiserver status ...
	I0620 17:16:37.682807   69716 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I0620 17:16:37.695543   69716 ssh_runner.go:195] Run: sudo egrep ^[0-9]+:freezer: /proc/2033/cgroup
	I0620 17:16:37.706808   69716 api_server.go:182] apiserver freezer: "6:freezer:/docker/475543bd85c2046515cee0a44a0d225ae8d6c5b728bb8a950d8904846eda528b/kubepods/burstable/podd8fdb5287279d2e471543642da81d649/251001439d17f76d2f35bf122517cb35c72caf797d9cf94055e2100f6f7c5e15"
	I0620 17:16:37.706895   69716 ssh_runner.go:195] Run: sudo cat /sys/fs/cgroup/freezer/docker/475543bd85c2046515cee0a44a0d225ae8d6c5b728bb8a950d8904846eda528b/kubepods/burstable/podd8fdb5287279d2e471543642da81d649/251001439d17f76d2f35bf122517cb35c72caf797d9cf94055e2100f6f7c5e15/freezer.state
	I0620 17:16:37.716346   69716 api_server.go:204] freezer state: "THAWED"
	I0620 17:16:37.716423   69716 api_server.go:253] Checking apiserver healthz at https://192.168.49.254:8443/healthz ...
	I0620 17:16:37.724165   69716 api_server.go:279] https://192.168.49.254:8443/healthz returned 200:
	ok
	I0620 17:16:37.724194   69716 status.go:422] ha-621968-m03 apiserver status = Running (err=<nil>)
	I0620 17:16:37.724205   69716 status.go:257] ha-621968-m03 status: &{Name:ha-621968-m03 Host:Running Kubelet:Running APIServer:Running Kubeconfig:Configured Worker:false TimeToStop: DockerEnv: PodManEnv:}
	I0620 17:16:37.724221   69716 status.go:255] checking status of ha-621968-m04 ...
	I0620 17:16:37.724535   69716 cli_runner.go:164] Run: docker container inspect ha-621968-m04 --format={{.State.Status}}
	I0620 17:16:37.746047   69716 status.go:330] ha-621968-m04 host status = "Running" (err=<nil>)
	I0620 17:16:37.746083   69716 host.go:66] Checking if "ha-621968-m04" exists ...
	I0620 17:16:37.746403   69716 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-621968-m04
	I0620 17:16:37.777178   69716 host.go:66] Checking if "ha-621968-m04" exists ...
	I0620 17:16:37.777497   69716 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I0620 17:16:37.777542   69716 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-621968-m04
	I0620 17:16:37.805305   69716 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32802 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/ha-621968-m04/id_rsa Username:docker}
	I0620 17:16:37.904484   69716 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I0620 17:16:37.916651   69716 status.go:257] ha-621968-m04 status: &{Name:ha-621968-m04 Host:Running Kubelet:Running APIServer:Irrelevant Kubeconfig:Irrelevant Worker:true TimeToStop: DockerEnv: PodManEnv:}

                                                
                                                
** /stderr **
--- PASS: TestMultiControlPlane/serial/StopSecondaryNode (11.84s)

                                                
                                    
x
+
TestMultiControlPlane/serial/DegradedAfterControlPlaneNodeStop (0.54s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/DegradedAfterControlPlaneNodeStop
ha_test.go:390: (dbg) Run:  out/minikube-linux-arm64 profile list --output json
E0620 17:16:38.197720    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/functional-493344/client.crt: no such file or directory
--- PASS: TestMultiControlPlane/serial/DegradedAfterControlPlaneNodeStop (0.54s)

                                                
                                    
x
+
TestMultiControlPlane/serial/RestartSecondaryNode (62.92s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/RestartSecondaryNode
ha_test.go:420: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 node start m02 -v=7 --alsologtostderr
E0620 17:16:43.319550    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/functional-493344/client.crt: no such file or directory
E0620 17:16:53.560651    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/functional-493344/client.crt: no such file or directory
E0620 17:17:14.041347    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/functional-493344/client.crt: no such file or directory
ha_test.go:420: (dbg) Done: out/minikube-linux-arm64 -p ha-621968 node start m02 -v=7 --alsologtostderr: (1m1.69376693s)
ha_test.go:428: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 status -v=7 --alsologtostderr
ha_test.go:428: (dbg) Done: out/minikube-linux-arm64 -p ha-621968 status -v=7 --alsologtostderr: (1.116287604s)
ha_test.go:448: (dbg) Run:  kubectl get nodes
--- PASS: TestMultiControlPlane/serial/RestartSecondaryNode (62.92s)

                                                
                                    
x
+
TestMultiControlPlane/serial/HAppyAfterSecondaryNodeRestart (0.77s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/HAppyAfterSecondaryNodeRestart
ha_test.go:281: (dbg) Run:  out/minikube-linux-arm64 profile list --output json
--- PASS: TestMultiControlPlane/serial/HAppyAfterSecondaryNodeRestart (0.77s)

                                                
                                    
x
+
TestMultiControlPlane/serial/RestartClusterKeepsNodes (215.66s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/RestartClusterKeepsNodes
ha_test.go:456: (dbg) Run:  out/minikube-linux-arm64 node list -p ha-621968 -v=7 --alsologtostderr
ha_test.go:462: (dbg) Run:  out/minikube-linux-arm64 stop -p ha-621968 -v=7 --alsologtostderr
E0620 17:17:55.001797    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/functional-493344/client.crt: no such file or directory
ha_test.go:462: (dbg) Done: out/minikube-linux-arm64 stop -p ha-621968 -v=7 --alsologtostderr: (34.222798899s)
ha_test.go:467: (dbg) Run:  out/minikube-linux-arm64 start -p ha-621968 --wait=true -v=7 --alsologtostderr
E0620 17:19:16.923140    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/functional-493344/client.crt: no such file or directory
E0620 17:20:36.957223    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt: no such file or directory
ha_test.go:467: (dbg) Done: out/minikube-linux-arm64 start -p ha-621968 --wait=true -v=7 --alsologtostderr: (3m1.283925878s)
ha_test.go:472: (dbg) Run:  out/minikube-linux-arm64 node list -p ha-621968
--- PASS: TestMultiControlPlane/serial/RestartClusterKeepsNodes (215.66s)

                                                
                                    
x
+
TestMultiControlPlane/serial/DeleteSecondaryNode (12.56s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/DeleteSecondaryNode
ha_test.go:487: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 node delete m03 -v=7 --alsologtostderr
ha_test.go:487: (dbg) Done: out/minikube-linux-arm64 -p ha-621968 node delete m03 -v=7 --alsologtostderr: (11.593070664s)
ha_test.go:493: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 status -v=7 --alsologtostderr
ha_test.go:511: (dbg) Run:  kubectl get nodes
ha_test.go:519: (dbg) Run:  kubectl get nodes -o "go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'"
--- PASS: TestMultiControlPlane/serial/DeleteSecondaryNode (12.56s)

                                                
                                    
x
+
TestMultiControlPlane/serial/DegradedAfterSecondaryNodeDelete (0.57s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/DegradedAfterSecondaryNodeDelete
ha_test.go:390: (dbg) Run:  out/minikube-linux-arm64 profile list --output json
--- PASS: TestMultiControlPlane/serial/DegradedAfterSecondaryNodeDelete (0.57s)

                                                
                                    
x
+
TestMultiControlPlane/serial/StopCluster (32.61s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/StopCluster
ha_test.go:531: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 stop -v=7 --alsologtostderr
E0620 17:21:33.078440    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/functional-493344/client.crt: no such file or directory
E0620 17:22:00.763383    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/functional-493344/client.crt: no such file or directory
ha_test.go:531: (dbg) Done: out/minikube-linux-arm64 -p ha-621968 stop -v=7 --alsologtostderr: (32.505970101s)
ha_test.go:537: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 status -v=7 --alsologtostderr
ha_test.go:537: (dbg) Non-zero exit: out/minikube-linux-arm64 -p ha-621968 status -v=7 --alsologtostderr: exit status 7 (105.613619ms)

                                                
                                                
-- stdout --
	ha-621968
	type: Control Plane
	host: Stopped
	kubelet: Stopped
	apiserver: Stopped
	kubeconfig: Stopped
	
	ha-621968-m02
	type: Control Plane
	host: Stopped
	kubelet: Stopped
	apiserver: Stopped
	kubeconfig: Stopped
	
	ha-621968-m04
	type: Worker
	host: Stopped
	kubelet: Stopped
	

                                                
                                                
-- /stdout --
** stderr ** 
	I0620 17:22:03.495158   95187 out.go:291] Setting OutFile to fd 1 ...
	I0620 17:22:03.495358   95187 out.go:338] TERM=,COLORTERM=, which probably does not support color
	I0620 17:22:03.495386   95187 out.go:304] Setting ErrFile to fd 2...
	I0620 17:22:03.495406   95187 out.go:338] TERM=,COLORTERM=, which probably does not support color
	I0620 17:22:03.495801   95187 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19106-2452/.minikube/bin
	I0620 17:22:03.496059   95187 out.go:298] Setting JSON to false
	I0620 17:22:03.496098   95187 mustload.go:65] Loading cluster: ha-621968
	I0620 17:22:03.496858   95187 config.go:182] Loaded profile config "ha-621968": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.30.2
	I0620 17:22:03.496900   95187 status.go:255] checking status of ha-621968 ...
	I0620 17:22:03.497565   95187 notify.go:220] Checking for updates...
	I0620 17:22:03.498013   95187 cli_runner.go:164] Run: docker container inspect ha-621968 --format={{.State.Status}}
	I0620 17:22:03.515902   95187 status.go:330] ha-621968 host status = "Stopped" (err=<nil>)
	I0620 17:22:03.515924   95187 status.go:343] host is not running, skipping remaining checks
	I0620 17:22:03.515931   95187 status.go:257] ha-621968 status: &{Name:ha-621968 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:false TimeToStop: DockerEnv: PodManEnv:}
	I0620 17:22:03.515956   95187 status.go:255] checking status of ha-621968-m02 ...
	I0620 17:22:03.516261   95187 cli_runner.go:164] Run: docker container inspect ha-621968-m02 --format={{.State.Status}}
	I0620 17:22:03.536594   95187 status.go:330] ha-621968-m02 host status = "Stopped" (err=<nil>)
	I0620 17:22:03.536617   95187 status.go:343] host is not running, skipping remaining checks
	I0620 17:22:03.536626   95187 status.go:257] ha-621968-m02 status: &{Name:ha-621968-m02 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:false TimeToStop: DockerEnv: PodManEnv:}
	I0620 17:22:03.536644   95187 status.go:255] checking status of ha-621968-m04 ...
	I0620 17:22:03.536933   95187 cli_runner.go:164] Run: docker container inspect ha-621968-m04 --format={{.State.Status}}
	I0620 17:22:03.556083   95187 status.go:330] ha-621968-m04 host status = "Stopped" (err=<nil>)
	I0620 17:22:03.556103   95187 status.go:343] host is not running, skipping remaining checks
	I0620 17:22:03.556111   95187 status.go:257] ha-621968-m04 status: &{Name:ha-621968-m04 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:true TimeToStop: DockerEnv: PodManEnv:}

                                                
                                                
** /stderr **
--- PASS: TestMultiControlPlane/serial/StopCluster (32.61s)

                                                
                                    
x
+
TestMultiControlPlane/serial/RestartCluster (86.11s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/RestartCluster
ha_test.go:560: (dbg) Run:  out/minikube-linux-arm64 start -p ha-621968 --wait=true -v=7 --alsologtostderr --driver=docker  --container-runtime=docker
ha_test.go:560: (dbg) Done: out/minikube-linux-arm64 start -p ha-621968 --wait=true -v=7 --alsologtostderr --driver=docker  --container-runtime=docker: (1m25.164340607s)
ha_test.go:566: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 status -v=7 --alsologtostderr
ha_test.go:584: (dbg) Run:  kubectl get nodes
ha_test.go:592: (dbg) Run:  kubectl get nodes -o "go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'"
--- PASS: TestMultiControlPlane/serial/RestartCluster (86.11s)

                                                
                                    
x
+
TestMultiControlPlane/serial/DegradedAfterClusterRestart (0.58s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/DegradedAfterClusterRestart
ha_test.go:390: (dbg) Run:  out/minikube-linux-arm64 profile list --output json
--- PASS: TestMultiControlPlane/serial/DegradedAfterClusterRestart (0.58s)

                                                
                                    
x
+
TestMultiControlPlane/serial/AddSecondaryNode (44.78s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/AddSecondaryNode
ha_test.go:605: (dbg) Run:  out/minikube-linux-arm64 node add -p ha-621968 --control-plane -v=7 --alsologtostderr
ha_test.go:605: (dbg) Done: out/minikube-linux-arm64 node add -p ha-621968 --control-plane -v=7 --alsologtostderr: (43.753476994s)
ha_test.go:611: (dbg) Run:  out/minikube-linux-arm64 -p ha-621968 status -v=7 --alsologtostderr
ha_test.go:611: (dbg) Done: out/minikube-linux-arm64 -p ha-621968 status -v=7 --alsologtostderr: (1.021364653s)
--- PASS: TestMultiControlPlane/serial/AddSecondaryNode (44.78s)

                                                
                                    
x
+
TestMultiControlPlane/serial/HAppyAfterSecondaryNodeAdd (0.79s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/HAppyAfterSecondaryNodeAdd
ha_test.go:281: (dbg) Run:  out/minikube-linux-arm64 profile list --output json
--- PASS: TestMultiControlPlane/serial/HAppyAfterSecondaryNodeAdd (0.79s)

                                                
                                    
x
+
TestImageBuild/serial/Setup (34.97s)

                                                
                                                
=== RUN   TestImageBuild/serial/Setup
image_test.go:69: (dbg) Run:  out/minikube-linux-arm64 start -p image-922786 --driver=docker  --container-runtime=docker
image_test.go:69: (dbg) Done: out/minikube-linux-arm64 start -p image-922786 --driver=docker  --container-runtime=docker: (34.966156089s)
--- PASS: TestImageBuild/serial/Setup (34.97s)

                                                
                                    
x
+
TestImageBuild/serial/NormalBuild (1.8s)

                                                
                                                
=== RUN   TestImageBuild/serial/NormalBuild
image_test.go:78: (dbg) Run:  out/minikube-linux-arm64 image build -t aaa:latest ./testdata/image-build/test-normal -p image-922786
image_test.go:78: (dbg) Done: out/minikube-linux-arm64 image build -t aaa:latest ./testdata/image-build/test-normal -p image-922786: (1.798474061s)
--- PASS: TestImageBuild/serial/NormalBuild (1.80s)

                                                
                                    
x
+
TestImageBuild/serial/BuildWithBuildArg (0.9s)

                                                
                                                
=== RUN   TestImageBuild/serial/BuildWithBuildArg
image_test.go:99: (dbg) Run:  out/minikube-linux-arm64 image build -t aaa:latest --build-opt=build-arg=ENV_A=test_env_str --build-opt=no-cache ./testdata/image-build/test-arg -p image-922786
--- PASS: TestImageBuild/serial/BuildWithBuildArg (0.90s)

                                                
                                    
x
+
TestImageBuild/serial/BuildWithDockerIgnore (0.71s)

                                                
                                                
=== RUN   TestImageBuild/serial/BuildWithDockerIgnore
image_test.go:133: (dbg) Run:  out/minikube-linux-arm64 image build -t aaa:latest ./testdata/image-build/test-normal --build-opt=no-cache -p image-922786
--- PASS: TestImageBuild/serial/BuildWithDockerIgnore (0.71s)

                                                
                                    
x
+
TestImageBuild/serial/BuildWithSpecifiedDockerfile (0.67s)

                                                
                                                
=== RUN   TestImageBuild/serial/BuildWithSpecifiedDockerfile
image_test.go:88: (dbg) Run:  out/minikube-linux-arm64 image build -t aaa:latest -f inner/Dockerfile ./testdata/image-build/test-f -p image-922786
--- PASS: TestImageBuild/serial/BuildWithSpecifiedDockerfile (0.67s)

                                                
                                    
x
+
TestJSONOutput/start/Command (47.67s)

                                                
                                                
=== RUN   TestJSONOutput/start/Command
json_output_test.go:63: (dbg) Run:  out/minikube-linux-arm64 start -p json-output-248130 --output=json --user=testUser --memory=2200 --wait=true --driver=docker  --container-runtime=docker
E0620 17:25:36.957286    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt: no such file or directory
json_output_test.go:63: (dbg) Done: out/minikube-linux-arm64 start -p json-output-248130 --output=json --user=testUser --memory=2200 --wait=true --driver=docker  --container-runtime=docker: (47.659797599s)
--- PASS: TestJSONOutput/start/Command (47.67s)

                                                
                                    
x
+
TestJSONOutput/start/Audit (0s)

                                                
                                                
=== RUN   TestJSONOutput/start/Audit
--- PASS: TestJSONOutput/start/Audit (0.00s)

                                                
                                    
x
+
TestJSONOutput/start/parallel/DistinctCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/start/parallel/DistinctCurrentSteps
=== PAUSE TestJSONOutput/start/parallel/DistinctCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/start/parallel/DistinctCurrentSteps
--- PASS: TestJSONOutput/start/parallel/DistinctCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/start/parallel/IncreasingCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/start/parallel/IncreasingCurrentSteps
=== PAUSE TestJSONOutput/start/parallel/IncreasingCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/start/parallel/IncreasingCurrentSteps
--- PASS: TestJSONOutput/start/parallel/IncreasingCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/pause/Command (0.61s)

                                                
                                                
=== RUN   TestJSONOutput/pause/Command
json_output_test.go:63: (dbg) Run:  out/minikube-linux-arm64 pause -p json-output-248130 --output=json --user=testUser
--- PASS: TestJSONOutput/pause/Command (0.61s)

                                                
                                    
x
+
TestJSONOutput/pause/Audit (0s)

                                                
                                                
=== RUN   TestJSONOutput/pause/Audit
--- PASS: TestJSONOutput/pause/Audit (0.00s)

                                                
                                    
x
+
TestJSONOutput/pause/parallel/DistinctCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/pause/parallel/DistinctCurrentSteps
=== PAUSE TestJSONOutput/pause/parallel/DistinctCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/pause/parallel/DistinctCurrentSteps
--- PASS: TestJSONOutput/pause/parallel/DistinctCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/pause/parallel/IncreasingCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/pause/parallel/IncreasingCurrentSteps
=== PAUSE TestJSONOutput/pause/parallel/IncreasingCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/pause/parallel/IncreasingCurrentSteps
--- PASS: TestJSONOutput/pause/parallel/IncreasingCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/unpause/Command (0.52s)

                                                
                                                
=== RUN   TestJSONOutput/unpause/Command
json_output_test.go:63: (dbg) Run:  out/minikube-linux-arm64 unpause -p json-output-248130 --output=json --user=testUser
--- PASS: TestJSONOutput/unpause/Command (0.52s)

                                                
                                    
x
+
TestJSONOutput/unpause/Audit (0s)

                                                
                                                
=== RUN   TestJSONOutput/unpause/Audit
--- PASS: TestJSONOutput/unpause/Audit (0.00s)

                                                
                                    
x
+
TestJSONOutput/unpause/parallel/DistinctCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/unpause/parallel/DistinctCurrentSteps
=== PAUSE TestJSONOutput/unpause/parallel/DistinctCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/unpause/parallel/DistinctCurrentSteps
--- PASS: TestJSONOutput/unpause/parallel/DistinctCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/unpause/parallel/IncreasingCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/unpause/parallel/IncreasingCurrentSteps
=== PAUSE TestJSONOutput/unpause/parallel/IncreasingCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/unpause/parallel/IncreasingCurrentSteps
--- PASS: TestJSONOutput/unpause/parallel/IncreasingCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/stop/Command (10.9s)

                                                
                                                
=== RUN   TestJSONOutput/stop/Command
json_output_test.go:63: (dbg) Run:  out/minikube-linux-arm64 stop -p json-output-248130 --output=json --user=testUser
json_output_test.go:63: (dbg) Done: out/minikube-linux-arm64 stop -p json-output-248130 --output=json --user=testUser: (10.900032408s)
--- PASS: TestJSONOutput/stop/Command (10.90s)

                                                
                                    
x
+
TestJSONOutput/stop/Audit (0s)

                                                
                                                
=== RUN   TestJSONOutput/stop/Audit
--- PASS: TestJSONOutput/stop/Audit (0.00s)

                                                
                                    
x
+
TestJSONOutput/stop/parallel/DistinctCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/stop/parallel/DistinctCurrentSteps
=== PAUSE TestJSONOutput/stop/parallel/DistinctCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/stop/parallel/DistinctCurrentSteps
--- PASS: TestJSONOutput/stop/parallel/DistinctCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/stop/parallel/IncreasingCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/stop/parallel/IncreasingCurrentSteps
=== PAUSE TestJSONOutput/stop/parallel/IncreasingCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/stop/parallel/IncreasingCurrentSteps
--- PASS: TestJSONOutput/stop/parallel/IncreasingCurrentSteps (0.00s)

                                                
                                    
x
+
TestErrorJSONOutput (0.21s)

                                                
                                                
=== RUN   TestErrorJSONOutput
json_output_test.go:160: (dbg) Run:  out/minikube-linux-arm64 start -p json-output-error-175522 --memory=2200 --output=json --wait=true --driver=fail
json_output_test.go:160: (dbg) Non-zero exit: out/minikube-linux-arm64 start -p json-output-error-175522 --memory=2200 --output=json --wait=true --driver=fail: exit status 56 (73.414277ms)

                                                
                                                
-- stdout --
	{"specversion":"1.0","id":"ede6cd22-3f42-4d54-8d82-7ff0ae2a8009","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"0","message":"[json-output-error-175522] minikube v1.33.1 on Ubuntu 20.04 (arm64)","name":"Initial Minikube Setup","totalsteps":"19"}}
	{"specversion":"1.0","id":"7df2ff35-5444-4871-b6de-3b1af473af1c","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_LOCATION=19106"}}
	{"specversion":"1.0","id":"5d2facb1-2ee3-4298-b593-3b38bfae4a75","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true"}}
	{"specversion":"1.0","id":"9282fe10-6d22-4e72-bb61-d01fc7de693f","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"KUBECONFIG=/home/jenkins/minikube-integration/19106-2452/kubeconfig"}}
	{"specversion":"1.0","id":"3b0a30b7-9aed-49f6-964a-193e29a67aa3","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_HOME=/home/jenkins/minikube-integration/19106-2452/.minikube"}}
	{"specversion":"1.0","id":"af86f9c1-a1a9-4c36-9046-c4c9a081a06d","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_BIN=out/minikube-linux-arm64"}}
	{"specversion":"1.0","id":"2a9fc03f-7995-4b1e-aa77-4f1c0343fa82","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_FORCE_SYSTEMD="}}
	{"specversion":"1.0","id":"5a47c6fa-23a2-472e-bc53-66308f6c8546","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.error","datacontenttype":"application/json","data":{"advice":"","exitcode":"56","issues":"","message":"The driver 'fail' is not supported on linux/arm64","name":"DRV_UNSUPPORTED_OS","url":""}}

                                                
                                                
-- /stdout --
helpers_test.go:175: Cleaning up "json-output-error-175522" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p json-output-error-175522
--- PASS: TestErrorJSONOutput (0.21s)

                                                
                                    
x
+
TestKicCustomNetwork/create_custom_network (33.49s)

                                                
                                                
=== RUN   TestKicCustomNetwork/create_custom_network
kic_custom_network_test.go:57: (dbg) Run:  out/minikube-linux-arm64 start -p docker-network-117233 --network=
E0620 17:26:33.079136    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/functional-493344/client.crt: no such file or directory
kic_custom_network_test.go:57: (dbg) Done: out/minikube-linux-arm64 start -p docker-network-117233 --network=: (31.321996668s)
kic_custom_network_test.go:150: (dbg) Run:  docker network ls --format {{.Name}}
helpers_test.go:175: Cleaning up "docker-network-117233" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p docker-network-117233
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p docker-network-117233: (2.148913397s)
--- PASS: TestKicCustomNetwork/create_custom_network (33.49s)

                                                
                                    
x
+
TestKicCustomNetwork/use_default_bridge_network (35.68s)

                                                
                                                
=== RUN   TestKicCustomNetwork/use_default_bridge_network
kic_custom_network_test.go:57: (dbg) Run:  out/minikube-linux-arm64 start -p docker-network-061614 --network=bridge
E0620 17:27:00.004461    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt: no such file or directory
kic_custom_network_test.go:57: (dbg) Done: out/minikube-linux-arm64 start -p docker-network-061614 --network=bridge: (33.735782327s)
kic_custom_network_test.go:150: (dbg) Run:  docker network ls --format {{.Name}}
helpers_test.go:175: Cleaning up "docker-network-061614" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p docker-network-061614
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p docker-network-061614: (1.924552588s)
--- PASS: TestKicCustomNetwork/use_default_bridge_network (35.68s)

                                                
                                    
x
+
TestKicExistingNetwork (35.22s)

                                                
                                                
=== RUN   TestKicExistingNetwork
kic_custom_network_test.go:150: (dbg) Run:  docker network ls --format {{.Name}}
kic_custom_network_test.go:93: (dbg) Run:  out/minikube-linux-arm64 start -p existing-network-367393 --network=existing-network
kic_custom_network_test.go:93: (dbg) Done: out/minikube-linux-arm64 start -p existing-network-367393 --network=existing-network: (32.950177867s)
helpers_test.go:175: Cleaning up "existing-network-367393" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p existing-network-367393
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p existing-network-367393: (2.137796872s)
--- PASS: TestKicExistingNetwork (35.22s)

                                                
                                    
x
+
TestKicCustomSubnet (35.74s)

                                                
                                                
=== RUN   TestKicCustomSubnet
kic_custom_network_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p custom-subnet-467989 --subnet=192.168.60.0/24
kic_custom_network_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p custom-subnet-467989 --subnet=192.168.60.0/24: (33.674233115s)
kic_custom_network_test.go:161: (dbg) Run:  docker network inspect custom-subnet-467989 --format "{{(index .IPAM.Config 0).Subnet}}"
helpers_test.go:175: Cleaning up "custom-subnet-467989" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p custom-subnet-467989
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p custom-subnet-467989: (2.04271043s)
--- PASS: TestKicCustomSubnet (35.74s)

                                                
                                    
x
+
TestKicStaticIP (34.06s)

                                                
                                                
=== RUN   TestKicStaticIP
kic_custom_network_test.go:132: (dbg) Run:  out/minikube-linux-arm64 start -p static-ip-779048 --static-ip=192.168.200.200
kic_custom_network_test.go:132: (dbg) Done: out/minikube-linux-arm64 start -p static-ip-779048 --static-ip=192.168.200.200: (31.748459296s)
kic_custom_network_test.go:138: (dbg) Run:  out/minikube-linux-arm64 -p static-ip-779048 ip
helpers_test.go:175: Cleaning up "static-ip-779048" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p static-ip-779048
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p static-ip-779048: (2.065453821s)
--- PASS: TestKicStaticIP (34.06s)

                                                
                                    
x
+
TestMainNoArgs (0.05s)

                                                
                                                
=== RUN   TestMainNoArgs
main_test.go:68: (dbg) Run:  out/minikube-linux-arm64
--- PASS: TestMainNoArgs (0.05s)

                                                
                                    
x
+
TestMinikubeProfile (68.85s)

                                                
                                                
=== RUN   TestMinikubeProfile
minikube_profile_test.go:44: (dbg) Run:  out/minikube-linux-arm64 start -p first-014685 --driver=docker  --container-runtime=docker
minikube_profile_test.go:44: (dbg) Done: out/minikube-linux-arm64 start -p first-014685 --driver=docker  --container-runtime=docker: (31.204682682s)
minikube_profile_test.go:44: (dbg) Run:  out/minikube-linux-arm64 start -p second-017139 --driver=docker  --container-runtime=docker
minikube_profile_test.go:44: (dbg) Done: out/minikube-linux-arm64 start -p second-017139 --driver=docker  --container-runtime=docker: (32.248065839s)
minikube_profile_test.go:51: (dbg) Run:  out/minikube-linux-arm64 profile first-014685
minikube_profile_test.go:55: (dbg) Run:  out/minikube-linux-arm64 profile list -ojson
minikube_profile_test.go:51: (dbg) Run:  out/minikube-linux-arm64 profile second-017139
minikube_profile_test.go:55: (dbg) Run:  out/minikube-linux-arm64 profile list -ojson
helpers_test.go:175: Cleaning up "second-017139" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p second-017139
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p second-017139: (2.074483733s)
helpers_test.go:175: Cleaning up "first-014685" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p first-014685
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p first-014685: (2.102702623s)
--- PASS: TestMinikubeProfile (68.85s)

                                                
                                    
x
+
TestMountStart/serial/StartWithMountFirst (10.6s)

                                                
                                                
=== RUN   TestMountStart/serial/StartWithMountFirst
mount_start_test.go:98: (dbg) Run:  out/minikube-linux-arm64 start -p mount-start-1-189569 --memory=2048 --mount --mount-gid 0 --mount-msize 6543 --mount-port 46464 --mount-uid 0 --no-kubernetes --driver=docker  --container-runtime=docker
mount_start_test.go:98: (dbg) Done: out/minikube-linux-arm64 start -p mount-start-1-189569 --memory=2048 --mount --mount-gid 0 --mount-msize 6543 --mount-port 46464 --mount-uid 0 --no-kubernetes --driver=docker  --container-runtime=docker: (9.597313544s)
--- PASS: TestMountStart/serial/StartWithMountFirst (10.60s)

                                                
                                    
x
+
TestMountStart/serial/VerifyMountFirst (0.27s)

                                                
                                                
=== RUN   TestMountStart/serial/VerifyMountFirst
mount_start_test.go:114: (dbg) Run:  out/minikube-linux-arm64 -p mount-start-1-189569 ssh -- ls /minikube-host
--- PASS: TestMountStart/serial/VerifyMountFirst (0.27s)

                                                
                                    
x
+
TestMountStart/serial/StartWithMountSecond (10.52s)

                                                
                                                
=== RUN   TestMountStart/serial/StartWithMountSecond
mount_start_test.go:98: (dbg) Run:  out/minikube-linux-arm64 start -p mount-start-2-201934 --memory=2048 --mount --mount-gid 0 --mount-msize 6543 --mount-port 46465 --mount-uid 0 --no-kubernetes --driver=docker  --container-runtime=docker
mount_start_test.go:98: (dbg) Done: out/minikube-linux-arm64 start -p mount-start-2-201934 --memory=2048 --mount --mount-gid 0 --mount-msize 6543 --mount-port 46465 --mount-uid 0 --no-kubernetes --driver=docker  --container-runtime=docker: (9.521738892s)
--- PASS: TestMountStart/serial/StartWithMountSecond (10.52s)

                                                
                                    
x
+
TestMountStart/serial/VerifyMountSecond (0.25s)

                                                
                                                
=== RUN   TestMountStart/serial/VerifyMountSecond
mount_start_test.go:114: (dbg) Run:  out/minikube-linux-arm64 -p mount-start-2-201934 ssh -- ls /minikube-host
--- PASS: TestMountStart/serial/VerifyMountSecond (0.25s)

                                                
                                    
x
+
TestMountStart/serial/DeleteFirst (1.45s)

                                                
                                                
=== RUN   TestMountStart/serial/DeleteFirst
pause_test.go:132: (dbg) Run:  out/minikube-linux-arm64 delete -p mount-start-1-189569 --alsologtostderr -v=5
pause_test.go:132: (dbg) Done: out/minikube-linux-arm64 delete -p mount-start-1-189569 --alsologtostderr -v=5: (1.449318494s)
--- PASS: TestMountStart/serial/DeleteFirst (1.45s)

                                                
                                    
x
+
TestMountStart/serial/VerifyMountPostDelete (0.24s)

                                                
                                                
=== RUN   TestMountStart/serial/VerifyMountPostDelete
mount_start_test.go:114: (dbg) Run:  out/minikube-linux-arm64 -p mount-start-2-201934 ssh -- ls /minikube-host
--- PASS: TestMountStart/serial/VerifyMountPostDelete (0.24s)

                                                
                                    
x
+
TestMountStart/serial/Stop (1.19s)

                                                
                                                
=== RUN   TestMountStart/serial/Stop
mount_start_test.go:155: (dbg) Run:  out/minikube-linux-arm64 stop -p mount-start-2-201934
mount_start_test.go:155: (dbg) Done: out/minikube-linux-arm64 stop -p mount-start-2-201934: (1.19363675s)
--- PASS: TestMountStart/serial/Stop (1.19s)

                                                
                                    
x
+
TestMountStart/serial/RestartStopped (8.33s)

                                                
                                                
=== RUN   TestMountStart/serial/RestartStopped
mount_start_test.go:166: (dbg) Run:  out/minikube-linux-arm64 start -p mount-start-2-201934
E0620 17:30:36.957190    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt: no such file or directory
mount_start_test.go:166: (dbg) Done: out/minikube-linux-arm64 start -p mount-start-2-201934: (7.331853325s)
--- PASS: TestMountStart/serial/RestartStopped (8.33s)

                                                
                                    
x
+
TestMountStart/serial/VerifyMountPostStop (0.26s)

                                                
                                                
=== RUN   TestMountStart/serial/VerifyMountPostStop
mount_start_test.go:114: (dbg) Run:  out/minikube-linux-arm64 -p mount-start-2-201934 ssh -- ls /minikube-host
--- PASS: TestMountStart/serial/VerifyMountPostStop (0.26s)

                                                
                                    
x
+
TestMultiNode/serial/FreshStart2Nodes (79.71s)

                                                
                                                
=== RUN   TestMultiNode/serial/FreshStart2Nodes
multinode_test.go:96: (dbg) Run:  out/minikube-linux-arm64 start -p multinode-632831 --wait=true --memory=2200 --nodes=2 -v=8 --alsologtostderr --driver=docker  --container-runtime=docker
E0620 17:31:33.078100    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/functional-493344/client.crt: no such file or directory
multinode_test.go:96: (dbg) Done: out/minikube-linux-arm64 start -p multinode-632831 --wait=true --memory=2200 --nodes=2 -v=8 --alsologtostderr --driver=docker  --container-runtime=docker: (1m19.120874944s)
multinode_test.go:102: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 status --alsologtostderr
--- PASS: TestMultiNode/serial/FreshStart2Nodes (79.71s)

                                                
                                    
x
+
TestMultiNode/serial/DeployApp2Nodes (44.36s)

                                                
                                                
=== RUN   TestMultiNode/serial/DeployApp2Nodes
multinode_test.go:493: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-632831 -- apply -f ./testdata/multinodes/multinode-pod-dns-test.yaml
multinode_test.go:498: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-632831 -- rollout status deployment/busybox
multinode_test.go:498: (dbg) Done: out/minikube-linux-arm64 kubectl -p multinode-632831 -- rollout status deployment/busybox: (3.081479666s)
multinode_test.go:505: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-632831 -- get pods -o jsonpath='{.items[*].status.podIP}'
multinode_test.go:514: expected 2 Pod IPs but got 1 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.3'\n\n-- /stdout --"
multinode_test.go:505: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-632831 -- get pods -o jsonpath='{.items[*].status.podIP}'
multinode_test.go:514: expected 2 Pod IPs but got 1 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.3'\n\n-- /stdout --"
multinode_test.go:505: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-632831 -- get pods -o jsonpath='{.items[*].status.podIP}'
multinode_test.go:514: expected 2 Pod IPs but got 1 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.3'\n\n-- /stdout --"
multinode_test.go:505: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-632831 -- get pods -o jsonpath='{.items[*].status.podIP}'
multinode_test.go:514: expected 2 Pod IPs but got 1 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.3'\n\n-- /stdout --"
multinode_test.go:505: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-632831 -- get pods -o jsonpath='{.items[*].status.podIP}'
multinode_test.go:514: expected 2 Pod IPs but got 1 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.3'\n\n-- /stdout --"
multinode_test.go:505: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-632831 -- get pods -o jsonpath='{.items[*].status.podIP}'
multinode_test.go:514: expected 2 Pod IPs but got 1 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.3'\n\n-- /stdout --"
multinode_test.go:505: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-632831 -- get pods -o jsonpath='{.items[*].status.podIP}'
multinode_test.go:514: expected 2 Pod IPs but got 1 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.3'\n\n-- /stdout --"
multinode_test.go:505: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-632831 -- get pods -o jsonpath='{.items[*].status.podIP}'
multinode_test.go:514: expected 2 Pod IPs but got 1 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.3'\n\n-- /stdout --"
multinode_test.go:505: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-632831 -- get pods -o jsonpath='{.items[*].status.podIP}'
multinode_test.go:528: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-632831 -- get pods -o jsonpath='{.items[*].metadata.name}'
multinode_test.go:536: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-632831 -- exec busybox-fc5497c4f-6lk4w -- nslookup kubernetes.io
multinode_test.go:536: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-632831 -- exec busybox-fc5497c4f-zcj4m -- nslookup kubernetes.io
multinode_test.go:546: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-632831 -- exec busybox-fc5497c4f-6lk4w -- nslookup kubernetes.default
multinode_test.go:546: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-632831 -- exec busybox-fc5497c4f-zcj4m -- nslookup kubernetes.default
multinode_test.go:554: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-632831 -- exec busybox-fc5497c4f-6lk4w -- nslookup kubernetes.default.svc.cluster.local
multinode_test.go:554: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-632831 -- exec busybox-fc5497c4f-zcj4m -- nslookup kubernetes.default.svc.cluster.local
--- PASS: TestMultiNode/serial/DeployApp2Nodes (44.36s)

                                                
                                    
x
+
TestMultiNode/serial/PingHostFrom2Pods (1.01s)

                                                
                                                
=== RUN   TestMultiNode/serial/PingHostFrom2Pods
multinode_test.go:564: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-632831 -- get pods -o jsonpath='{.items[*].metadata.name}'
multinode_test.go:572: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-632831 -- exec busybox-fc5497c4f-6lk4w -- sh -c "nslookup host.minikube.internal | awk 'NR==5' | cut -d' ' -f3"
multinode_test.go:583: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-632831 -- exec busybox-fc5497c4f-6lk4w -- sh -c "ping -c 1 192.168.67.1"
multinode_test.go:572: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-632831 -- exec busybox-fc5497c4f-zcj4m -- sh -c "nslookup host.minikube.internal | awk 'NR==5' | cut -d' ' -f3"
multinode_test.go:583: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-632831 -- exec busybox-fc5497c4f-zcj4m -- sh -c "ping -c 1 192.168.67.1"
--- PASS: TestMultiNode/serial/PingHostFrom2Pods (1.01s)

                                                
                                    
x
+
TestMultiNode/serial/AddNode (20.38s)

                                                
                                                
=== RUN   TestMultiNode/serial/AddNode
multinode_test.go:121: (dbg) Run:  out/minikube-linux-arm64 node add -p multinode-632831 -v 3 --alsologtostderr
E0620 17:32:56.124148    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/functional-493344/client.crt: no such file or directory
multinode_test.go:121: (dbg) Done: out/minikube-linux-arm64 node add -p multinode-632831 -v 3 --alsologtostderr: (19.597580144s)
multinode_test.go:127: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 status --alsologtostderr
--- PASS: TestMultiNode/serial/AddNode (20.38s)

                                                
                                    
x
+
TestMultiNode/serial/MultiNodeLabels (0.11s)

                                                
                                                
=== RUN   TestMultiNode/serial/MultiNodeLabels
multinode_test.go:221: (dbg) Run:  kubectl --context multinode-632831 get nodes -o "jsonpath=[{range .items[*]}{.metadata.labels},{end}]"
--- PASS: TestMultiNode/serial/MultiNodeLabels (0.11s)

                                                
                                    
x
+
TestMultiNode/serial/ProfileList (0.34s)

                                                
                                                
=== RUN   TestMultiNode/serial/ProfileList
multinode_test.go:143: (dbg) Run:  out/minikube-linux-arm64 profile list --output json
--- PASS: TestMultiNode/serial/ProfileList (0.34s)

                                                
                                    
x
+
TestMultiNode/serial/CopyFile (9.99s)

                                                
                                                
=== RUN   TestMultiNode/serial/CopyFile
multinode_test.go:184: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 status --output json --alsologtostderr
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 cp testdata/cp-test.txt multinode-632831:/home/docker/cp-test.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 ssh -n multinode-632831 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 cp multinode-632831:/home/docker/cp-test.txt /tmp/TestMultiNodeserialCopyFile1069866961/001/cp-test_multinode-632831.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 ssh -n multinode-632831 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 cp multinode-632831:/home/docker/cp-test.txt multinode-632831-m02:/home/docker/cp-test_multinode-632831_multinode-632831-m02.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 ssh -n multinode-632831 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 ssh -n multinode-632831-m02 "sudo cat /home/docker/cp-test_multinode-632831_multinode-632831-m02.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 cp multinode-632831:/home/docker/cp-test.txt multinode-632831-m03:/home/docker/cp-test_multinode-632831_multinode-632831-m03.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 ssh -n multinode-632831 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 ssh -n multinode-632831-m03 "sudo cat /home/docker/cp-test_multinode-632831_multinode-632831-m03.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 cp testdata/cp-test.txt multinode-632831-m02:/home/docker/cp-test.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 ssh -n multinode-632831-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 cp multinode-632831-m02:/home/docker/cp-test.txt /tmp/TestMultiNodeserialCopyFile1069866961/001/cp-test_multinode-632831-m02.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 ssh -n multinode-632831-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 cp multinode-632831-m02:/home/docker/cp-test.txt multinode-632831:/home/docker/cp-test_multinode-632831-m02_multinode-632831.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 ssh -n multinode-632831-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 ssh -n multinode-632831 "sudo cat /home/docker/cp-test_multinode-632831-m02_multinode-632831.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 cp multinode-632831-m02:/home/docker/cp-test.txt multinode-632831-m03:/home/docker/cp-test_multinode-632831-m02_multinode-632831-m03.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 ssh -n multinode-632831-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 ssh -n multinode-632831-m03 "sudo cat /home/docker/cp-test_multinode-632831-m02_multinode-632831-m03.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 cp testdata/cp-test.txt multinode-632831-m03:/home/docker/cp-test.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 ssh -n multinode-632831-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 cp multinode-632831-m03:/home/docker/cp-test.txt /tmp/TestMultiNodeserialCopyFile1069866961/001/cp-test_multinode-632831-m03.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 ssh -n multinode-632831-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 cp multinode-632831-m03:/home/docker/cp-test.txt multinode-632831:/home/docker/cp-test_multinode-632831-m03_multinode-632831.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 ssh -n multinode-632831-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 ssh -n multinode-632831 "sudo cat /home/docker/cp-test_multinode-632831-m03_multinode-632831.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 cp multinode-632831-m03:/home/docker/cp-test.txt multinode-632831-m02:/home/docker/cp-test_multinode-632831-m03_multinode-632831-m02.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 ssh -n multinode-632831-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 ssh -n multinode-632831-m02 "sudo cat /home/docker/cp-test_multinode-632831-m03_multinode-632831-m02.txt"
--- PASS: TestMultiNode/serial/CopyFile (9.99s)

                                                
                                    
x
+
TestMultiNode/serial/StopNode (2.25s)

                                                
                                                
=== RUN   TestMultiNode/serial/StopNode
multinode_test.go:248: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 node stop m03
multinode_test.go:248: (dbg) Done: out/minikube-linux-arm64 -p multinode-632831 node stop m03: (1.220311146s)
multinode_test.go:254: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 status
multinode_test.go:254: (dbg) Non-zero exit: out/minikube-linux-arm64 -p multinode-632831 status: exit status 7 (498.265529ms)

                                                
                                                
-- stdout --
	multinode-632831
	type: Control Plane
	host: Running
	kubelet: Running
	apiserver: Running
	kubeconfig: Configured
	
	multinode-632831-m02
	type: Worker
	host: Running
	kubelet: Running
	
	multinode-632831-m03
	type: Worker
	host: Stopped
	kubelet: Stopped
	

                                                
                                                
-- /stdout --
multinode_test.go:261: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 status --alsologtostderr
multinode_test.go:261: (dbg) Non-zero exit: out/minikube-linux-arm64 -p multinode-632831 status --alsologtostderr: exit status 7 (527.118049ms)

                                                
                                                
-- stdout --
	multinode-632831
	type: Control Plane
	host: Running
	kubelet: Running
	apiserver: Running
	kubeconfig: Configured
	
	multinode-632831-m02
	type: Worker
	host: Running
	kubelet: Running
	
	multinode-632831-m03
	type: Worker
	host: Stopped
	kubelet: Stopped
	

                                                
                                                
-- /stdout --
** stderr ** 
	I0620 17:33:23.785933  163656 out.go:291] Setting OutFile to fd 1 ...
	I0620 17:33:23.786061  163656 out.go:338] TERM=,COLORTERM=, which probably does not support color
	I0620 17:33:23.786071  163656 out.go:304] Setting ErrFile to fd 2...
	I0620 17:33:23.786076  163656 out.go:338] TERM=,COLORTERM=, which probably does not support color
	I0620 17:33:23.786313  163656 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19106-2452/.minikube/bin
	I0620 17:33:23.786479  163656 out.go:298] Setting JSON to false
	I0620 17:33:23.786513  163656 mustload.go:65] Loading cluster: multinode-632831
	I0620 17:33:23.786610  163656 notify.go:220] Checking for updates...
	I0620 17:33:23.786906  163656 config.go:182] Loaded profile config "multinode-632831": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.30.2
	I0620 17:33:23.786923  163656 status.go:255] checking status of multinode-632831 ...
	I0620 17:33:23.787770  163656 cli_runner.go:164] Run: docker container inspect multinode-632831 --format={{.State.Status}}
	I0620 17:33:23.805777  163656 status.go:330] multinode-632831 host status = "Running" (err=<nil>)
	I0620 17:33:23.805812  163656 host.go:66] Checking if "multinode-632831" exists ...
	I0620 17:33:23.806091  163656 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" multinode-632831
	I0620 17:33:23.832498  163656 host.go:66] Checking if "multinode-632831" exists ...
	I0620 17:33:23.832812  163656 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I0620 17:33:23.832868  163656 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-632831
	I0620 17:33:23.850486  163656 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32913 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/multinode-632831/id_rsa Username:docker}
	I0620 17:33:23.948940  163656 ssh_runner.go:195] Run: systemctl --version
	I0620 17:33:23.953971  163656 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I0620 17:33:23.966037  163656 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0620 17:33:24.042466  163656 info.go:266] docker info: {ID:6ZPO:QZND:VNGE:LUKL:4Y3K:XELL:AAX4:2GTK:E6LM:MPRN:3ZXR:TTMR Containers:3 ContainersRunning:2 ContainersPaused:0 ContainersStopped:1 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:41 OomKillDisable:true NGoroutines:62 SystemTime:2024-06-20 17:33:24.03179588 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1063-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aarc
h64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214900736 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-30-239 Labels:[] ExperimentalBuild:false ServerVersion:26.1.4 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d2d58213f83a351ca8f528a95fbd145f5654e957 Expected:d2d58213f83a351ca8f528a95fbd145f5654e957} RuncCommit:{ID:v1.1.12-0-g51d5e94 Expected:v1.1.12-0-g51d5e94} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerError
s:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.14.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.27.1]] Warnings:<nil>}}
	I0620 17:33:24.043163  163656 kubeconfig.go:125] found "multinode-632831" server: "https://192.168.67.2:8443"
	I0620 17:33:24.043193  163656 api_server.go:166] Checking apiserver status ...
	I0620 17:33:24.043246  163656 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I0620 17:33:24.056427  163656 ssh_runner.go:195] Run: sudo egrep ^[0-9]+:freezer: /proc/2080/cgroup
	I0620 17:33:24.067619  163656 api_server.go:182] apiserver freezer: "6:freezer:/docker/51cd6fd7f0b73e9965ad2f13c8b404988c0047eb8781394a793ff00ec3f76b2b/kubepods/burstable/podc9c02c2ccca0d56b7dc7c694169bcccd/dc727314bb8feb1138652858e512f8c02d7cd8a44b613af213c232699b379074"
	I0620 17:33:24.067692  163656 ssh_runner.go:195] Run: sudo cat /sys/fs/cgroup/freezer/docker/51cd6fd7f0b73e9965ad2f13c8b404988c0047eb8781394a793ff00ec3f76b2b/kubepods/burstable/podc9c02c2ccca0d56b7dc7c694169bcccd/dc727314bb8feb1138652858e512f8c02d7cd8a44b613af213c232699b379074/freezer.state
	I0620 17:33:24.078810  163656 api_server.go:204] freezer state: "THAWED"
	I0620 17:33:24.078850  163656 api_server.go:253] Checking apiserver healthz at https://192.168.67.2:8443/healthz ...
	I0620 17:33:24.086770  163656 api_server.go:279] https://192.168.67.2:8443/healthz returned 200:
	ok
	I0620 17:33:24.086801  163656 status.go:422] multinode-632831 apiserver status = Running (err=<nil>)
	I0620 17:33:24.086849  163656 status.go:257] multinode-632831 status: &{Name:multinode-632831 Host:Running Kubelet:Running APIServer:Running Kubeconfig:Configured Worker:false TimeToStop: DockerEnv: PodManEnv:}
	I0620 17:33:24.086868  163656 status.go:255] checking status of multinode-632831-m02 ...
	I0620 17:33:24.087258  163656 cli_runner.go:164] Run: docker container inspect multinode-632831-m02 --format={{.State.Status}}
	I0620 17:33:24.104455  163656 status.go:330] multinode-632831-m02 host status = "Running" (err=<nil>)
	I0620 17:33:24.104481  163656 host.go:66] Checking if "multinode-632831-m02" exists ...
	I0620 17:33:24.104806  163656 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" multinode-632831-m02
	I0620 17:33:24.121759  163656 host.go:66] Checking if "multinode-632831-m02" exists ...
	I0620 17:33:24.122095  163656 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I0620 17:33:24.122146  163656 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-632831-m02
	I0620 17:33:24.139906  163656 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32918 SSHKeyPath:/home/jenkins/minikube-integration/19106-2452/.minikube/machines/multinode-632831-m02/id_rsa Username:docker}
	I0620 17:33:24.231853  163656 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I0620 17:33:24.243153  163656 status.go:257] multinode-632831-m02 status: &{Name:multinode-632831-m02 Host:Running Kubelet:Running APIServer:Irrelevant Kubeconfig:Irrelevant Worker:true TimeToStop: DockerEnv: PodManEnv:}
	I0620 17:33:24.243189  163656 status.go:255] checking status of multinode-632831-m03 ...
	I0620 17:33:24.243506  163656 cli_runner.go:164] Run: docker container inspect multinode-632831-m03 --format={{.State.Status}}
	I0620 17:33:24.259580  163656 status.go:330] multinode-632831-m03 host status = "Stopped" (err=<nil>)
	I0620 17:33:24.259602  163656 status.go:343] host is not running, skipping remaining checks
	I0620 17:33:24.259610  163656 status.go:257] multinode-632831-m03 status: &{Name:multinode-632831-m03 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:true TimeToStop: DockerEnv: PodManEnv:}

                                                
                                                
** /stderr **
--- PASS: TestMultiNode/serial/StopNode (2.25s)

                                                
                                    
x
+
TestMultiNode/serial/StartAfterStop (11.11s)

                                                
                                                
=== RUN   TestMultiNode/serial/StartAfterStop
multinode_test.go:282: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 node start m03 -v=7 --alsologtostderr
multinode_test.go:282: (dbg) Done: out/minikube-linux-arm64 -p multinode-632831 node start m03 -v=7 --alsologtostderr: (10.363739121s)
multinode_test.go:290: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 status -v=7 --alsologtostderr
multinode_test.go:306: (dbg) Run:  kubectl get nodes
--- PASS: TestMultiNode/serial/StartAfterStop (11.11s)

                                                
                                    
x
+
TestMultiNode/serial/RestartKeepsNodes (87.6s)

                                                
                                                
=== RUN   TestMultiNode/serial/RestartKeepsNodes
multinode_test.go:314: (dbg) Run:  out/minikube-linux-arm64 node list -p multinode-632831
multinode_test.go:321: (dbg) Run:  out/minikube-linux-arm64 stop -p multinode-632831
multinode_test.go:321: (dbg) Done: out/minikube-linux-arm64 stop -p multinode-632831: (22.491474097s)
multinode_test.go:326: (dbg) Run:  out/minikube-linux-arm64 start -p multinode-632831 --wait=true -v=8 --alsologtostderr
multinode_test.go:326: (dbg) Done: out/minikube-linux-arm64 start -p multinode-632831 --wait=true -v=8 --alsologtostderr: (1m4.991372401s)
multinode_test.go:331: (dbg) Run:  out/minikube-linux-arm64 node list -p multinode-632831
--- PASS: TestMultiNode/serial/RestartKeepsNodes (87.60s)

                                                
                                    
x
+
TestMultiNode/serial/DeleteNode (5.61s)

                                                
                                                
=== RUN   TestMultiNode/serial/DeleteNode
multinode_test.go:416: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 node delete m03
multinode_test.go:416: (dbg) Done: out/minikube-linux-arm64 -p multinode-632831 node delete m03: (4.943309341s)
multinode_test.go:422: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 status --alsologtostderr
multinode_test.go:436: (dbg) Run:  kubectl get nodes
multinode_test.go:444: (dbg) Run:  kubectl get nodes -o "go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'"
--- PASS: TestMultiNode/serial/DeleteNode (5.61s)

                                                
                                    
x
+
TestMultiNode/serial/StopMultiNode (21.51s)

                                                
                                                
=== RUN   TestMultiNode/serial/StopMultiNode
multinode_test.go:345: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 stop
multinode_test.go:345: (dbg) Done: out/minikube-linux-arm64 -p multinode-632831 stop: (21.295264924s)
multinode_test.go:351: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 status
multinode_test.go:351: (dbg) Non-zero exit: out/minikube-linux-arm64 -p multinode-632831 status: exit status 7 (95.987903ms)

                                                
                                                
-- stdout --
	multinode-632831
	type: Control Plane
	host: Stopped
	kubelet: Stopped
	apiserver: Stopped
	kubeconfig: Stopped
	
	multinode-632831-m02
	type: Worker
	host: Stopped
	kubelet: Stopped
	

                                                
                                                
-- /stdout --
multinode_test.go:358: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 status --alsologtostderr
multinode_test.go:358: (dbg) Non-zero exit: out/minikube-linux-arm64 -p multinode-632831 status --alsologtostderr: exit status 7 (122.396751ms)

                                                
                                                
-- stdout --
	multinode-632831
	type: Control Plane
	host: Stopped
	kubelet: Stopped
	apiserver: Stopped
	kubeconfig: Stopped
	
	multinode-632831-m02
	type: Worker
	host: Stopped
	kubelet: Stopped
	

                                                
                                                
-- /stdout --
** stderr ** 
	I0620 17:35:30.040582  175632 out.go:291] Setting OutFile to fd 1 ...
	I0620 17:35:30.040697  175632 out.go:338] TERM=,COLORTERM=, which probably does not support color
	I0620 17:35:30.040703  175632 out.go:304] Setting ErrFile to fd 2...
	I0620 17:35:30.040714  175632 out.go:338] TERM=,COLORTERM=, which probably does not support color
	I0620 17:35:30.041660  175632 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19106-2452/.minikube/bin
	I0620 17:35:30.041923  175632 out.go:298] Setting JSON to false
	I0620 17:35:30.041971  175632 mustload.go:65] Loading cluster: multinode-632831
	I0620 17:35:30.042215  175632 notify.go:220] Checking for updates...
	I0620 17:35:30.042409  175632 config.go:182] Loaded profile config "multinode-632831": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.30.2
	I0620 17:35:30.042429  175632 status.go:255] checking status of multinode-632831 ...
	I0620 17:35:30.042928  175632 cli_runner.go:164] Run: docker container inspect multinode-632831 --format={{.State.Status}}
	I0620 17:35:30.064110  175632 status.go:330] multinode-632831 host status = "Stopped" (err=<nil>)
	I0620 17:35:30.064146  175632 status.go:343] host is not running, skipping remaining checks
	I0620 17:35:30.064154  175632 status.go:257] multinode-632831 status: &{Name:multinode-632831 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:false TimeToStop: DockerEnv: PodManEnv:}
	I0620 17:35:30.064181  175632 status.go:255] checking status of multinode-632831-m02 ...
	I0620 17:35:30.064526  175632 cli_runner.go:164] Run: docker container inspect multinode-632831-m02 --format={{.State.Status}}
	I0620 17:35:30.096920  175632 status.go:330] multinode-632831-m02 host status = "Stopped" (err=<nil>)
	I0620 17:35:30.096947  175632 status.go:343] host is not running, skipping remaining checks
	I0620 17:35:30.096955  175632 status.go:257] multinode-632831-m02 status: &{Name:multinode-632831-m02 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:true TimeToStop: DockerEnv: PodManEnv:}

                                                
                                                
** /stderr **
--- PASS: TestMultiNode/serial/StopMultiNode (21.51s)

                                                
                                    
x
+
TestMultiNode/serial/RestartMultiNode (32.03s)

                                                
                                                
=== RUN   TestMultiNode/serial/RestartMultiNode
multinode_test.go:376: (dbg) Run:  out/minikube-linux-arm64 start -p multinode-632831 --wait=true -v=8 --alsologtostderr --driver=docker  --container-runtime=docker
E0620 17:35:36.957728    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt: no such file or directory
multinode_test.go:376: (dbg) Done: out/minikube-linux-arm64 start -p multinode-632831 --wait=true -v=8 --alsologtostderr --driver=docker  --container-runtime=docker: (31.322420626s)
multinode_test.go:382: (dbg) Run:  out/minikube-linux-arm64 -p multinode-632831 status --alsologtostderr
multinode_test.go:396: (dbg) Run:  kubectl get nodes
multinode_test.go:404: (dbg) Run:  kubectl get nodes -o "go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'"
--- PASS: TestMultiNode/serial/RestartMultiNode (32.03s)

                                                
                                    
x
+
TestMultiNode/serial/ValidateNameConflict (35.59s)

                                                
                                                
=== RUN   TestMultiNode/serial/ValidateNameConflict
multinode_test.go:455: (dbg) Run:  out/minikube-linux-arm64 node list -p multinode-632831
multinode_test.go:464: (dbg) Run:  out/minikube-linux-arm64 start -p multinode-632831-m02 --driver=docker  --container-runtime=docker
multinode_test.go:464: (dbg) Non-zero exit: out/minikube-linux-arm64 start -p multinode-632831-m02 --driver=docker  --container-runtime=docker: exit status 14 (77.804632ms)

                                                
                                                
-- stdout --
	* [multinode-632831-m02] minikube v1.33.1 on Ubuntu 20.04 (arm64)
	  - MINIKUBE_LOCATION=19106
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/19106-2452/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/19106-2452/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-arm64
	  - MINIKUBE_FORCE_SYSTEMD=
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	! Profile name 'multinode-632831-m02' is duplicated with machine name 'multinode-632831-m02' in profile 'multinode-632831'
	X Exiting due to MK_USAGE: Profile name should be unique

                                                
                                                
** /stderr **
multinode_test.go:472: (dbg) Run:  out/minikube-linux-arm64 start -p multinode-632831-m03 --driver=docker  --container-runtime=docker
E0620 17:36:33.078876    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/functional-493344/client.crt: no such file or directory
multinode_test.go:472: (dbg) Done: out/minikube-linux-arm64 start -p multinode-632831-m03 --driver=docker  --container-runtime=docker: (33.115957459s)
multinode_test.go:479: (dbg) Run:  out/minikube-linux-arm64 node add -p multinode-632831
multinode_test.go:479: (dbg) Non-zero exit: out/minikube-linux-arm64 node add -p multinode-632831: exit status 80 (313.860469ms)

                                                
                                                
-- stdout --
	* Adding node m03 to cluster multinode-632831 as [worker]
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	X Exiting due to GUEST_NODE_ADD: failed to add node: Node multinode-632831-m03 already exists in multinode-632831-m03 profile
	* 
	╭─────────────────────────────────────────────────────────────────────────────────────────────╮
	│                                                                                             │
	│    * If the above advice does not help, please let us know:                                 │
	│      https://github.com/kubernetes/minikube/issues/new/choose                               │
	│                                                                                             │
	│    * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue.    │
	│    * Please also attach the following file to the GitHub issue:                             │
	│    * - /tmp/minikube_node_040ea7097fd6ed71e65be9a474587f81f0ccd21d_0.log                    │
	│                                                                                             │
	╰─────────────────────────────────────────────────────────────────────────────────────────────╯

                                                
                                                
** /stderr **
multinode_test.go:484: (dbg) Run:  out/minikube-linux-arm64 delete -p multinode-632831-m03
multinode_test.go:484: (dbg) Done: out/minikube-linux-arm64 delete -p multinode-632831-m03: (2.030288348s)
--- PASS: TestMultiNode/serial/ValidateNameConflict (35.59s)

                                                
                                    
x
+
TestPreload (147.26s)

                                                
                                                
=== RUN   TestPreload
preload_test.go:44: (dbg) Run:  out/minikube-linux-arm64 start -p test-preload-085520 --memory=2200 --alsologtostderr --wait=true --preload=false --driver=docker  --container-runtime=docker --kubernetes-version=v1.24.4
preload_test.go:44: (dbg) Done: out/minikube-linux-arm64 start -p test-preload-085520 --memory=2200 --alsologtostderr --wait=true --preload=false --driver=docker  --container-runtime=docker --kubernetes-version=v1.24.4: (1m43.355909185s)
preload_test.go:52: (dbg) Run:  out/minikube-linux-arm64 -p test-preload-085520 image pull gcr.io/k8s-minikube/busybox
preload_test.go:52: (dbg) Done: out/minikube-linux-arm64 -p test-preload-085520 image pull gcr.io/k8s-minikube/busybox: (1.361283743s)
preload_test.go:58: (dbg) Run:  out/minikube-linux-arm64 stop -p test-preload-085520
preload_test.go:58: (dbg) Done: out/minikube-linux-arm64 stop -p test-preload-085520: (10.766279925s)
preload_test.go:66: (dbg) Run:  out/minikube-linux-arm64 start -p test-preload-085520 --memory=2200 --alsologtostderr -v=1 --wait=true --driver=docker  --container-runtime=docker
preload_test.go:66: (dbg) Done: out/minikube-linux-arm64 start -p test-preload-085520 --memory=2200 --alsologtostderr -v=1 --wait=true --driver=docker  --container-runtime=docker: (29.320872653s)
preload_test.go:71: (dbg) Run:  out/minikube-linux-arm64 -p test-preload-085520 image list
helpers_test.go:175: Cleaning up "test-preload-085520" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p test-preload-085520
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p test-preload-085520: (2.220046542s)
--- PASS: TestPreload (147.26s)

                                                
                                    
x
+
TestScheduledStopUnix (103.75s)

                                                
                                                
=== RUN   TestScheduledStopUnix
scheduled_stop_test.go:128: (dbg) Run:  out/minikube-linux-arm64 start -p scheduled-stop-150929 --memory=2048 --driver=docker  --container-runtime=docker
scheduled_stop_test.go:128: (dbg) Done: out/minikube-linux-arm64 start -p scheduled-stop-150929 --memory=2048 --driver=docker  --container-runtime=docker: (30.549253524s)
scheduled_stop_test.go:137: (dbg) Run:  out/minikube-linux-arm64 stop -p scheduled-stop-150929 --schedule 5m
scheduled_stop_test.go:191: (dbg) Run:  out/minikube-linux-arm64 status --format={{.TimeToStop}} -p scheduled-stop-150929 -n scheduled-stop-150929
scheduled_stop_test.go:169: signal error was:  <nil>
scheduled_stop_test.go:137: (dbg) Run:  out/minikube-linux-arm64 stop -p scheduled-stop-150929 --schedule 15s
scheduled_stop_test.go:169: signal error was:  os: process already finished
scheduled_stop_test.go:137: (dbg) Run:  out/minikube-linux-arm64 stop -p scheduled-stop-150929 --cancel-scheduled
scheduled_stop_test.go:176: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p scheduled-stop-150929 -n scheduled-stop-150929
scheduled_stop_test.go:205: (dbg) Run:  out/minikube-linux-arm64 status -p scheduled-stop-150929
scheduled_stop_test.go:137: (dbg) Run:  out/minikube-linux-arm64 stop -p scheduled-stop-150929 --schedule 15s
scheduled_stop_test.go:169: signal error was:  os: process already finished
E0620 17:40:36.958230    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt: no such file or directory
scheduled_stop_test.go:205: (dbg) Run:  out/minikube-linux-arm64 status -p scheduled-stop-150929
scheduled_stop_test.go:205: (dbg) Non-zero exit: out/minikube-linux-arm64 status -p scheduled-stop-150929: exit status 7 (66.861342ms)

                                                
                                                
-- stdout --
	scheduled-stop-150929
	type: Control Plane
	host: Stopped
	kubelet: Stopped
	apiserver: Stopped
	kubeconfig: Stopped
	

                                                
                                                
-- /stdout --
scheduled_stop_test.go:176: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p scheduled-stop-150929 -n scheduled-stop-150929
scheduled_stop_test.go:176: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Host}} -p scheduled-stop-150929 -n scheduled-stop-150929: exit status 7 (70.089739ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
scheduled_stop_test.go:176: status error: exit status 7 (may be ok)
helpers_test.go:175: Cleaning up "scheduled-stop-150929" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p scheduled-stop-150929
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p scheduled-stop-150929: (1.669157817s)
--- PASS: TestScheduledStopUnix (103.75s)

                                                
                                    
x
+
TestSkaffold (116.11s)

                                                
                                                
=== RUN   TestSkaffold
skaffold_test.go:59: (dbg) Run:  /tmp/skaffold.exe1171600171 version
skaffold_test.go:63: skaffold version: v2.12.0
skaffold_test.go:66: (dbg) Run:  out/minikube-linux-arm64 start -p skaffold-187542 --memory=2600 --driver=docker  --container-runtime=docker
skaffold_test.go:66: (dbg) Done: out/minikube-linux-arm64 start -p skaffold-187542 --memory=2600 --driver=docker  --container-runtime=docker: (32.098373826s)
skaffold_test.go:86: copying out/minikube-linux-arm64 to /home/jenkins/workspace/Docker_Linux_docker_arm64/out/minikube
skaffold_test.go:105: (dbg) Run:  /tmp/skaffold.exe1171600171 run --minikube-profile skaffold-187542 --kube-context skaffold-187542 --status-check=true --port-forward=false --interactive=false
E0620 17:41:33.077980    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/functional-493344/client.crt: no such file or directory
skaffold_test.go:105: (dbg) Done: /tmp/skaffold.exe1171600171 run --minikube-profile skaffold-187542 --kube-context skaffold-187542 --status-check=true --port-forward=false --interactive=false: (1m8.888483849s)
skaffold_test.go:111: (dbg) TestSkaffold: waiting 1m0s for pods matching "app=leeroy-app" in namespace "default" ...
helpers_test.go:344: "leeroy-app-7c7c6fbfb5-hmrz5" [7bd82f3b-5dcd-4679-badf-e595b10cd0f4] Running
skaffold_test.go:111: (dbg) TestSkaffold: app=leeroy-app healthy within 6.003564946s
skaffold_test.go:114: (dbg) TestSkaffold: waiting 1m0s for pods matching "app=leeroy-web" in namespace "default" ...
helpers_test.go:344: "leeroy-web-79b6ddb5bc-xnpr7" [6e7ae2a1-df36-4b50-a863-92a5f9aa53ee] Running
skaffold_test.go:114: (dbg) TestSkaffold: app=leeroy-web healthy within 5.004746137s
helpers_test.go:175: Cleaning up "skaffold-187542" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p skaffold-187542
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p skaffold-187542: (2.872569389s)
--- PASS: TestSkaffold (116.11s)

                                                
                                    
x
+
TestInsufficientStorage (10.73s)

                                                
                                                
=== RUN   TestInsufficientStorage
status_test.go:50: (dbg) Run:  out/minikube-linux-arm64 start -p insufficient-storage-662810 --memory=2048 --output=json --wait=true --driver=docker  --container-runtime=docker
status_test.go:50: (dbg) Non-zero exit: out/minikube-linux-arm64 start -p insufficient-storage-662810 --memory=2048 --output=json --wait=true --driver=docker  --container-runtime=docker: exit status 26 (8.47913041s)

                                                
                                                
-- stdout --
	{"specversion":"1.0","id":"ef7ec834-e6dd-4cda-adae-29a2b4e4b6df","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"0","message":"[insufficient-storage-662810] minikube v1.33.1 on Ubuntu 20.04 (arm64)","name":"Initial Minikube Setup","totalsteps":"19"}}
	{"specversion":"1.0","id":"b71ac256-5d63-4594-9699-4131e55b572a","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_LOCATION=19106"}}
	{"specversion":"1.0","id":"ace192d5-4a06-40a8-beae-14ed10231c07","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true"}}
	{"specversion":"1.0","id":"9d014a58-4988-4dbd-b52f-fc4507ac7950","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"KUBECONFIG=/home/jenkins/minikube-integration/19106-2452/kubeconfig"}}
	{"specversion":"1.0","id":"b4869fc7-2e85-4b8f-baf8-a0fd9e9ea449","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_HOME=/home/jenkins/minikube-integration/19106-2452/.minikube"}}
	{"specversion":"1.0","id":"bacee0a8-07af-4c42-a270-30342c00dd07","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_BIN=out/minikube-linux-arm64"}}
	{"specversion":"1.0","id":"d1874859-550e-4cd4-9eb8-5e1ca0376be2","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_FORCE_SYSTEMD="}}
	{"specversion":"1.0","id":"3c57eebf-289e-41b9-b7ea-6d5804962507","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_TEST_STORAGE_CAPACITY=100"}}
	{"specversion":"1.0","id":"f0a5f0fb-eb2e-481f-b041-e75047042eca","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_TEST_AVAILABLE_STORAGE=19"}}
	{"specversion":"1.0","id":"6f9b19ce-0a82-4e43-8e75-caf4b33c1204","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"1","message":"Using the docker driver based on user configuration","name":"Selecting Driver","totalsteps":"19"}}
	{"specversion":"1.0","id":"6983827a-f2d6-4035-95ca-752148fd2395","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"Using Docker driver with root privileges"}}
	{"specversion":"1.0","id":"e429a724-4922-45ee-a3ab-f737c8b597c6","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"3","message":"Starting \"insufficient-storage-662810\" primary control-plane node in \"insufficient-storage-662810\" cluster","name":"Starting Node","totalsteps":"19"}}
	{"specversion":"1.0","id":"6d2be6e1-cc6e-41a2-b0a1-d4fac66e583b","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"5","message":"Pulling base image v0.0.44-1718753665-19106 ...","name":"Pulling Base Image","totalsteps":"19"}}
	{"specversion":"1.0","id":"bf1f53d4-a6cb-46bb-933a-b5d5cd3d908f","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"8","message":"Creating docker container (CPUs=2, Memory=2048MB) ...","name":"Creating Container","totalsteps":"19"}}
	{"specversion":"1.0","id":"635a55af-29b9-4859-b1f9-b25f26edfa69","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.error","datacontenttype":"application/json","data":{"advice":"Try one or more of the following to free up space on the device:\n\t\n\t\t\t1. Run \"docker system prune\" to remove unused Docker data (optionally with \"-a\")\n\t\t\t2. Increase the storage allocated to Docker for Desktop by clicking on:\n\t\t\t\tDocker icon \u003e Preferences \u003e Resources \u003e Disk Image Size\n\t\t\t3. Run \"minikube ssh -- docker system prune\" if using the Docker container runtime","exitcode":"26","issues":"https://github.com/kubernetes/minikube/issues/9024","message":"Docker is out of disk space! (/var is at 100%% of capacity). You can pass '--force' to skip this check.","name":"RSRC_DOCKER_STORAGE","url":""}}

                                                
                                                
-- /stdout --
status_test.go:76: (dbg) Run:  out/minikube-linux-arm64 status -p insufficient-storage-662810 --output=json --layout=cluster
status_test.go:76: (dbg) Non-zero exit: out/minikube-linux-arm64 status -p insufficient-storage-662810 --output=json --layout=cluster: exit status 7 (283.415392ms)

                                                
                                                
-- stdout --
	{"Name":"insufficient-storage-662810","StatusCode":507,"StatusName":"InsufficientStorage","StatusDetail":"/var is almost out of disk space","Step":"Creating Container","StepDetail":"Creating docker container (CPUs=2, Memory=2048MB) ...","BinaryVersion":"v1.33.1","Components":{"kubeconfig":{"Name":"kubeconfig","StatusCode":500,"StatusName":"Error"}},"Nodes":[{"Name":"insufficient-storage-662810","StatusCode":507,"StatusName":"InsufficientStorage","Components":{"apiserver":{"Name":"apiserver","StatusCode":405,"StatusName":"Stopped"},"kubelet":{"Name":"kubelet","StatusCode":405,"StatusName":"Stopped"}}}]}

                                                
                                                
-- /stdout --
** stderr ** 
	E0620 17:42:57.765610  207839 status.go:417] kubeconfig endpoint: get endpoint: "insufficient-storage-662810" does not appear in /home/jenkins/minikube-integration/19106-2452/kubeconfig

                                                
                                                
** /stderr **
status_test.go:76: (dbg) Run:  out/minikube-linux-arm64 status -p insufficient-storage-662810 --output=json --layout=cluster
status_test.go:76: (dbg) Non-zero exit: out/minikube-linux-arm64 status -p insufficient-storage-662810 --output=json --layout=cluster: exit status 7 (285.77943ms)

                                                
                                                
-- stdout --
	{"Name":"insufficient-storage-662810","StatusCode":507,"StatusName":"InsufficientStorage","StatusDetail":"/var is almost out of disk space","BinaryVersion":"v1.33.1","Components":{"kubeconfig":{"Name":"kubeconfig","StatusCode":500,"StatusName":"Error"}},"Nodes":[{"Name":"insufficient-storage-662810","StatusCode":507,"StatusName":"InsufficientStorage","Components":{"apiserver":{"Name":"apiserver","StatusCode":405,"StatusName":"Stopped"},"kubelet":{"Name":"kubelet","StatusCode":405,"StatusName":"Stopped"}}}]}

                                                
                                                
-- /stdout --
** stderr ** 
	E0620 17:42:58.052487  207891 status.go:417] kubeconfig endpoint: get endpoint: "insufficient-storage-662810" does not appear in /home/jenkins/minikube-integration/19106-2452/kubeconfig
	E0620 17:42:58.062899  207891 status.go:560] unable to read event log: stat: stat /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/insufficient-storage-662810/events.json: no such file or directory

                                                
                                                
** /stderr **
helpers_test.go:175: Cleaning up "insufficient-storage-662810" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p insufficient-storage-662810
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p insufficient-storage-662810: (1.685091839s)
--- PASS: TestInsufficientStorage (10.73s)

                                                
                                    
x
+
TestRunningBinaryUpgrade (83.01s)

                                                
                                                
=== RUN   TestRunningBinaryUpgrade
=== PAUSE TestRunningBinaryUpgrade

                                                
                                                

                                                
                                                
=== CONT  TestRunningBinaryUpgrade
version_upgrade_test.go:120: (dbg) Run:  /tmp/minikube-v1.26.0.1186412006 start -p running-upgrade-872368 --memory=2200 --vm-driver=docker  --container-runtime=docker
version_upgrade_test.go:120: (dbg) Done: /tmp/minikube-v1.26.0.1186412006 start -p running-upgrade-872368 --memory=2200 --vm-driver=docker  --container-runtime=docker: (44.298797315s)
version_upgrade_test.go:130: (dbg) Run:  out/minikube-linux-arm64 start -p running-upgrade-872368 --memory=2200 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker
version_upgrade_test.go:130: (dbg) Done: out/minikube-linux-arm64 start -p running-upgrade-872368 --memory=2200 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker: (35.563299399s)
helpers_test.go:175: Cleaning up "running-upgrade-872368" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p running-upgrade-872368
E0620 17:51:33.077955    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/functional-493344/client.crt: no such file or directory
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p running-upgrade-872368: (2.225340483s)
--- PASS: TestRunningBinaryUpgrade (83.01s)

                                                
                                    
x
+
TestKubernetesUpgrade (366.86s)

                                                
                                                
=== RUN   TestKubernetesUpgrade
=== PAUSE TestKubernetesUpgrade

                                                
                                                

                                                
                                                
=== CONT  TestKubernetesUpgrade
version_upgrade_test.go:222: (dbg) Run:  out/minikube-linux-arm64 start -p kubernetes-upgrade-487063 --memory=2200 --kubernetes-version=v1.20.0 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker
E0620 17:49:36.125299    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/functional-493344/client.crt: no such file or directory
version_upgrade_test.go:222: (dbg) Done: out/minikube-linux-arm64 start -p kubernetes-upgrade-487063 --memory=2200 --kubernetes-version=v1.20.0 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker: (59.019368633s)
version_upgrade_test.go:227: (dbg) Run:  out/minikube-linux-arm64 stop -p kubernetes-upgrade-487063
version_upgrade_test.go:227: (dbg) Done: out/minikube-linux-arm64 stop -p kubernetes-upgrade-487063: (1.457103487s)
version_upgrade_test.go:232: (dbg) Run:  out/minikube-linux-arm64 -p kubernetes-upgrade-487063 status --format={{.Host}}
E0620 17:50:18.964926    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/skaffold-187542/client.crt: no such file or directory
version_upgrade_test.go:232: (dbg) Non-zero exit: out/minikube-linux-arm64 -p kubernetes-upgrade-487063 status --format={{.Host}}: exit status 7 (90.92913ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
version_upgrade_test.go:234: status error: exit status 7 (may be ok)
version_upgrade_test.go:243: (dbg) Run:  out/minikube-linux-arm64 start -p kubernetes-upgrade-487063 --memory=2200 --kubernetes-version=v1.30.2 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker
E0620 17:50:36.958109    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt: no such file or directory
version_upgrade_test.go:243: (dbg) Done: out/minikube-linux-arm64 start -p kubernetes-upgrade-487063 --memory=2200 --kubernetes-version=v1.30.2 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker: (4m42.513196246s)
version_upgrade_test.go:248: (dbg) Run:  kubectl --context kubernetes-upgrade-487063 version --output=json
version_upgrade_test.go:267: Attempting to downgrade Kubernetes (should fail)
version_upgrade_test.go:269: (dbg) Run:  out/minikube-linux-arm64 start -p kubernetes-upgrade-487063 --memory=2200 --kubernetes-version=v1.20.0 --driver=docker  --container-runtime=docker
version_upgrade_test.go:269: (dbg) Non-zero exit: out/minikube-linux-arm64 start -p kubernetes-upgrade-487063 --memory=2200 --kubernetes-version=v1.20.0 --driver=docker  --container-runtime=docker: exit status 106 (79.022209ms)

                                                
                                                
-- stdout --
	* [kubernetes-upgrade-487063] minikube v1.33.1 on Ubuntu 20.04 (arm64)
	  - MINIKUBE_LOCATION=19106
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/19106-2452/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/19106-2452/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-arm64
	  - MINIKUBE_FORCE_SYSTEMD=
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	X Exiting due to K8S_DOWNGRADE_UNSUPPORTED: Unable to safely downgrade existing Kubernetes v1.30.2 cluster to v1.20.0
	* Suggestion: 
	
	    1) Recreate the cluster with Kubernetes 1.20.0, by running:
	    
	    minikube delete -p kubernetes-upgrade-487063
	    minikube start -p kubernetes-upgrade-487063 --kubernetes-version=v1.20.0
	    
	    2) Create a second cluster with Kubernetes 1.20.0, by running:
	    
	    minikube start -p kubernetes-upgrade-4870632 --kubernetes-version=v1.20.0
	    
	    3) Use the existing cluster at version Kubernetes 1.30.2, by running:
	    
	    minikube start -p kubernetes-upgrade-487063 --kubernetes-version=v1.30.2
	    

                                                
                                                
** /stderr **
version_upgrade_test.go:273: Attempting restart after unsuccessful downgrade
version_upgrade_test.go:275: (dbg) Run:  out/minikube-linux-arm64 start -p kubernetes-upgrade-487063 --memory=2200 --kubernetes-version=v1.30.2 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker
version_upgrade_test.go:275: (dbg) Done: out/minikube-linux-arm64 start -p kubernetes-upgrade-487063 --memory=2200 --kubernetes-version=v1.30.2 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker: (20.925964933s)
helpers_test.go:175: Cleaning up "kubernetes-upgrade-487063" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p kubernetes-upgrade-487063
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p kubernetes-upgrade-487063: (2.677398721s)
--- PASS: TestKubernetesUpgrade (366.86s)

                                                
                                    
x
+
TestMissingContainerUpgrade (119.33s)

                                                
                                                
=== RUN   TestMissingContainerUpgrade
=== PAUSE TestMissingContainerUpgrade

                                                
                                                

                                                
                                                
=== CONT  TestMissingContainerUpgrade
version_upgrade_test.go:309: (dbg) Run:  /tmp/minikube-v1.26.0.1974610598 start -p missing-upgrade-172861 --memory=2200 --driver=docker  --container-runtime=docker
E0620 17:48:16.083787    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/skaffold-187542/client.crt: no such file or directory
version_upgrade_test.go:309: (dbg) Done: /tmp/minikube-v1.26.0.1974610598 start -p missing-upgrade-172861 --memory=2200 --driver=docker  --container-runtime=docker: (40.105278729s)
version_upgrade_test.go:318: (dbg) Run:  docker stop missing-upgrade-172861
version_upgrade_test.go:318: (dbg) Done: docker stop missing-upgrade-172861: (10.437390458s)
version_upgrade_test.go:323: (dbg) Run:  docker rm missing-upgrade-172861
version_upgrade_test.go:329: (dbg) Run:  out/minikube-linux-arm64 start -p missing-upgrade-172861 --memory=2200 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker
version_upgrade_test.go:329: (dbg) Done: out/minikube-linux-arm64 start -p missing-upgrade-172861 --memory=2200 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker: (1m5.4588521s)
helpers_test.go:175: Cleaning up "missing-upgrade-172861" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p missing-upgrade-172861
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p missing-upgrade-172861: (2.660970944s)
--- PASS: TestMissingContainerUpgrade (119.33s)

                                                
                                    
x
+
TestNoKubernetes/serial/StartNoK8sWithVersion (0.11s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/StartNoK8sWithVersion
no_kubernetes_test.go:83: (dbg) Run:  out/minikube-linux-arm64 start -p NoKubernetes-487577 --no-kubernetes --kubernetes-version=1.20 --driver=docker  --container-runtime=docker
no_kubernetes_test.go:83: (dbg) Non-zero exit: out/minikube-linux-arm64 start -p NoKubernetes-487577 --no-kubernetes --kubernetes-version=1.20 --driver=docker  --container-runtime=docker: exit status 14 (114.289058ms)

                                                
                                                
-- stdout --
	* [NoKubernetes-487577] minikube v1.33.1 on Ubuntu 20.04 (arm64)
	  - MINIKUBE_LOCATION=19106
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/19106-2452/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/19106-2452/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-arm64
	  - MINIKUBE_FORCE_SYSTEMD=
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	X Exiting due to MK_USAGE: cannot specify --kubernetes-version with --no-kubernetes,
	to unset a global config run:
	
	$ minikube config unset kubernetes-version

                                                
                                                
** /stderr **
--- PASS: TestNoKubernetes/serial/StartNoK8sWithVersion (0.11s)

                                                
                                    
x
+
TestNoKubernetes/serial/StartWithK8s (45.34s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/StartWithK8s
no_kubernetes_test.go:95: (dbg) Run:  out/minikube-linux-arm64 start -p NoKubernetes-487577 --driver=docker  --container-runtime=docker
E0620 17:43:40.005499    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt: no such file or directory
no_kubernetes_test.go:95: (dbg) Done: out/minikube-linux-arm64 start -p NoKubernetes-487577 --driver=docker  --container-runtime=docker: (44.889863694s)
no_kubernetes_test.go:200: (dbg) Run:  out/minikube-linux-arm64 -p NoKubernetes-487577 status -o json
--- PASS: TestNoKubernetes/serial/StartWithK8s (45.34s)

                                                
                                    
x
+
TestNoKubernetes/serial/StartWithStopK8s (17.22s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/StartWithStopK8s
no_kubernetes_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p NoKubernetes-487577 --no-kubernetes --driver=docker  --container-runtime=docker
no_kubernetes_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p NoKubernetes-487577 --no-kubernetes --driver=docker  --container-runtime=docker: (14.805417033s)
no_kubernetes_test.go:200: (dbg) Run:  out/minikube-linux-arm64 -p NoKubernetes-487577 status -o json
no_kubernetes_test.go:200: (dbg) Non-zero exit: out/minikube-linux-arm64 -p NoKubernetes-487577 status -o json: exit status 2 (578.106789ms)

                                                
                                                
-- stdout --
	{"Name":"NoKubernetes-487577","Host":"Running","Kubelet":"Stopped","APIServer":"Stopped","Kubeconfig":"Configured","Worker":false}

                                                
                                                
-- /stdout --
no_kubernetes_test.go:124: (dbg) Run:  out/minikube-linux-arm64 delete -p NoKubernetes-487577
no_kubernetes_test.go:124: (dbg) Done: out/minikube-linux-arm64 delete -p NoKubernetes-487577: (1.834043607s)
--- PASS: TestNoKubernetes/serial/StartWithStopK8s (17.22s)

                                                
                                    
x
+
TestNoKubernetes/serial/Start (11.15s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/Start
no_kubernetes_test.go:136: (dbg) Run:  out/minikube-linux-arm64 start -p NoKubernetes-487577 --no-kubernetes --driver=docker  --container-runtime=docker
no_kubernetes_test.go:136: (dbg) Done: out/minikube-linux-arm64 start -p NoKubernetes-487577 --no-kubernetes --driver=docker  --container-runtime=docker: (11.151307389s)
--- PASS: TestNoKubernetes/serial/Start (11.15s)

                                                
                                    
x
+
TestNoKubernetes/serial/VerifyK8sNotRunning (0.31s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/VerifyK8sNotRunning
no_kubernetes_test.go:147: (dbg) Run:  out/minikube-linux-arm64 ssh -p NoKubernetes-487577 "sudo systemctl is-active --quiet service kubelet"
no_kubernetes_test.go:147: (dbg) Non-zero exit: out/minikube-linux-arm64 ssh -p NoKubernetes-487577 "sudo systemctl is-active --quiet service kubelet": exit status 1 (310.764379ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 3

                                                
                                                
** /stderr **
--- PASS: TestNoKubernetes/serial/VerifyK8sNotRunning (0.31s)

                                                
                                    
x
+
TestNoKubernetes/serial/ProfileList (0.85s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/ProfileList
no_kubernetes_test.go:169: (dbg) Run:  out/minikube-linux-arm64 profile list
no_kubernetes_test.go:179: (dbg) Run:  out/minikube-linux-arm64 profile list --output=json
--- PASS: TestNoKubernetes/serial/ProfileList (0.85s)

                                                
                                    
x
+
TestNoKubernetes/serial/Stop (1.25s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/Stop
no_kubernetes_test.go:158: (dbg) Run:  out/minikube-linux-arm64 stop -p NoKubernetes-487577
no_kubernetes_test.go:158: (dbg) Done: out/minikube-linux-arm64 stop -p NoKubernetes-487577: (1.246987639s)
--- PASS: TestNoKubernetes/serial/Stop (1.25s)

                                                
                                    
x
+
TestNoKubernetes/serial/StartNoArgs (8.44s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/StartNoArgs
no_kubernetes_test.go:191: (dbg) Run:  out/minikube-linux-arm64 start -p NoKubernetes-487577 --driver=docker  --container-runtime=docker
no_kubernetes_test.go:191: (dbg) Done: out/minikube-linux-arm64 start -p NoKubernetes-487577 --driver=docker  --container-runtime=docker: (8.438320593s)
--- PASS: TestNoKubernetes/serial/StartNoArgs (8.44s)

                                                
                                    
x
+
TestNoKubernetes/serial/VerifyK8sNotRunningSecond (0.32s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/VerifyK8sNotRunningSecond
no_kubernetes_test.go:147: (dbg) Run:  out/minikube-linux-arm64 ssh -p NoKubernetes-487577 "sudo systemctl is-active --quiet service kubelet"
no_kubernetes_test.go:147: (dbg) Non-zero exit: out/minikube-linux-arm64 ssh -p NoKubernetes-487577 "sudo systemctl is-active --quiet service kubelet": exit status 1 (321.684743ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 3

                                                
                                                
** /stderr **
--- PASS: TestNoKubernetes/serial/VerifyK8sNotRunningSecond (0.32s)

                                                
                                    
x
+
TestStoppedBinaryUpgrade/Setup (0.64s)

                                                
                                                
=== RUN   TestStoppedBinaryUpgrade/Setup
--- PASS: TestStoppedBinaryUpgrade/Setup (0.64s)

                                                
                                    
x
+
TestStoppedBinaryUpgrade/Upgrade (110.04s)

                                                
                                                
=== RUN   TestStoppedBinaryUpgrade/Upgrade
version_upgrade_test.go:183: (dbg) Run:  /tmp/minikube-v1.26.0.2379395265 start -p stopped-upgrade-496770 --memory=2200 --vm-driver=docker  --container-runtime=docker
E0620 17:46:33.079384    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/functional-493344/client.crt: no such file or directory
version_upgrade_test.go:183: (dbg) Done: /tmp/minikube-v1.26.0.2379395265 start -p stopped-upgrade-496770 --memory=2200 --vm-driver=docker  --container-runtime=docker: (1m6.732294306s)
version_upgrade_test.go:192: (dbg) Run:  /tmp/minikube-v1.26.0.2379395265 -p stopped-upgrade-496770 stop
version_upgrade_test.go:192: (dbg) Done: /tmp/minikube-v1.26.0.2379395265 -p stopped-upgrade-496770 stop: (10.87232668s)
version_upgrade_test.go:198: (dbg) Run:  out/minikube-linux-arm64 start -p stopped-upgrade-496770 --memory=2200 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker
E0620 17:47:35.119426    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/skaffold-187542/client.crt: no such file or directory
E0620 17:47:35.125054    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/skaffold-187542/client.crt: no such file or directory
E0620 17:47:35.135335    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/skaffold-187542/client.crt: no such file or directory
E0620 17:47:35.155965    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/skaffold-187542/client.crt: no such file or directory
E0620 17:47:35.196307    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/skaffold-187542/client.crt: no such file or directory
E0620 17:47:35.276876    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/skaffold-187542/client.crt: no such file or directory
E0620 17:47:35.437648    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/skaffold-187542/client.crt: no such file or directory
E0620 17:47:35.758796    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/skaffold-187542/client.crt: no such file or directory
E0620 17:47:36.399159    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/skaffold-187542/client.crt: no such file or directory
E0620 17:47:37.679694    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/skaffold-187542/client.crt: no such file or directory
E0620 17:47:40.240450    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/skaffold-187542/client.crt: no such file or directory
E0620 17:47:45.361207    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/skaffold-187542/client.crt: no such file or directory
E0620 17:47:55.601499    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/skaffold-187542/client.crt: no such file or directory
version_upgrade_test.go:198: (dbg) Done: out/minikube-linux-arm64 start -p stopped-upgrade-496770 --memory=2200 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker: (32.438374264s)
--- PASS: TestStoppedBinaryUpgrade/Upgrade (110.04s)

                                                
                                    
x
+
TestStoppedBinaryUpgrade/MinikubeLogs (1.34s)

                                                
                                                
=== RUN   TestStoppedBinaryUpgrade/MinikubeLogs
version_upgrade_test.go:206: (dbg) Run:  out/minikube-linux-arm64 logs -p stopped-upgrade-496770
version_upgrade_test.go:206: (dbg) Done: out/minikube-linux-arm64 logs -p stopped-upgrade-496770: (1.342089118s)
--- PASS: TestStoppedBinaryUpgrade/MinikubeLogs (1.34s)

                                                
                                    
x
+
TestPause/serial/Start (88.64s)

                                                
                                                
=== RUN   TestPause/serial/Start
pause_test.go:80: (dbg) Run:  out/minikube-linux-arm64 start -p pause-062744 --memory=2048 --install-addons=false --wait=all --driver=docker  --container-runtime=docker
E0620 17:52:35.118734    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/skaffold-187542/client.crt: no such file or directory
pause_test.go:80: (dbg) Done: out/minikube-linux-arm64 start -p pause-062744 --memory=2048 --install-addons=false --wait=all --driver=docker  --container-runtime=docker: (1m28.635725876s)
--- PASS: TestPause/serial/Start (88.64s)

                                                
                                    
x
+
TestPause/serial/SecondStartNoReconfiguration (34.65s)

                                                
                                                
=== RUN   TestPause/serial/SecondStartNoReconfiguration
pause_test.go:92: (dbg) Run:  out/minikube-linux-arm64 start -p pause-062744 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker
E0620 17:53:02.805787    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/skaffold-187542/client.crt: no such file or directory
pause_test.go:92: (dbg) Done: out/minikube-linux-arm64 start -p pause-062744 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker: (34.620712824s)
--- PASS: TestPause/serial/SecondStartNoReconfiguration (34.65s)

                                                
                                    
x
+
TestPause/serial/Pause (0.61s)

                                                
                                                
=== RUN   TestPause/serial/Pause
pause_test.go:110: (dbg) Run:  out/minikube-linux-arm64 pause -p pause-062744 --alsologtostderr -v=5
--- PASS: TestPause/serial/Pause (0.61s)

                                                
                                    
x
+
TestPause/serial/VerifyStatus (0.31s)

                                                
                                                
=== RUN   TestPause/serial/VerifyStatus
status_test.go:76: (dbg) Run:  out/minikube-linux-arm64 status -p pause-062744 --output=json --layout=cluster
status_test.go:76: (dbg) Non-zero exit: out/minikube-linux-arm64 status -p pause-062744 --output=json --layout=cluster: exit status 2 (308.80891ms)

                                                
                                                
-- stdout --
	{"Name":"pause-062744","StatusCode":418,"StatusName":"Paused","Step":"Done","StepDetail":"* Paused 12 containers in: kube-system, kubernetes-dashboard, storage-gluster, istio-operator","BinaryVersion":"v1.33.1","Components":{"kubeconfig":{"Name":"kubeconfig","StatusCode":200,"StatusName":"OK"}},"Nodes":[{"Name":"pause-062744","StatusCode":200,"StatusName":"OK","Components":{"apiserver":{"Name":"apiserver","StatusCode":418,"StatusName":"Paused"},"kubelet":{"Name":"kubelet","StatusCode":405,"StatusName":"Stopped"}}}]}

                                                
                                                
-- /stdout --
--- PASS: TestPause/serial/VerifyStatus (0.31s)

                                                
                                    
x
+
TestPause/serial/Unpause (0.61s)

                                                
                                                
=== RUN   TestPause/serial/Unpause
pause_test.go:121: (dbg) Run:  out/minikube-linux-arm64 unpause -p pause-062744 --alsologtostderr -v=5
--- PASS: TestPause/serial/Unpause (0.61s)

                                                
                                    
x
+
TestPause/serial/PauseAgain (0.92s)

                                                
                                                
=== RUN   TestPause/serial/PauseAgain
pause_test.go:110: (dbg) Run:  out/minikube-linux-arm64 pause -p pause-062744 --alsologtostderr -v=5
--- PASS: TestPause/serial/PauseAgain (0.92s)

                                                
                                    
x
+
TestPause/serial/DeletePaused (2.35s)

                                                
                                                
=== RUN   TestPause/serial/DeletePaused
pause_test.go:132: (dbg) Run:  out/minikube-linux-arm64 delete -p pause-062744 --alsologtostderr -v=5
pause_test.go:132: (dbg) Done: out/minikube-linux-arm64 delete -p pause-062744 --alsologtostderr -v=5: (2.348520096s)
--- PASS: TestPause/serial/DeletePaused (2.35s)

                                                
                                    
x
+
TestPause/serial/VerifyDeletedResources (14.05s)

                                                
                                                
=== RUN   TestPause/serial/VerifyDeletedResources
pause_test.go:142: (dbg) Run:  out/minikube-linux-arm64 profile list --output json
pause_test.go:142: (dbg) Done: out/minikube-linux-arm64 profile list --output json: (13.994526024s)
pause_test.go:168: (dbg) Run:  docker ps -a
pause_test.go:173: (dbg) Run:  docker volume inspect pause-062744
pause_test.go:173: (dbg) Non-zero exit: docker volume inspect pause-062744: exit status 1 (15.643834ms)

                                                
                                                
-- stdout --
	[]

                                                
                                                
-- /stdout --
** stderr ** 
	Error response from daemon: get pause-062744: no such volume

                                                
                                                
** /stderr **
pause_test.go:178: (dbg) Run:  docker network ls
--- PASS: TestPause/serial/VerifyDeletedResources (14.05s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/Start (87.87s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p auto-978988 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --driver=docker  --container-runtime=docker
net_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p auto-978988 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --driver=docker  --container-runtime=docker: (1m27.873585514s)
--- PASS: TestNetworkPlugins/group/auto/Start (87.87s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/KubeletFlags (0.39s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-arm64 ssh -p auto-978988 "pgrep -a kubelet"
--- PASS: TestNetworkPlugins/group/auto/KubeletFlags (0.39s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/NetCatPod (13.35s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context auto-978988 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/auto/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:344: "netcat-6bc787d567-95w6p" [1046a7f2-8b55-467c-b3e1-fc820635bfc0] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:344: "netcat-6bc787d567-95w6p" [1046a7f2-8b55-467c-b3e1-fc820635bfc0] Running
E0620 17:55:36.958007    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt: no such file or directory
net_test.go:163: (dbg) TestNetworkPlugins/group/auto/NetCatPod: app=netcat healthy within 13.006391538s
--- PASS: TestNetworkPlugins/group/auto/NetCatPod (13.35s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/Start (65.56s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p kindnet-978988 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=kindnet --driver=docker  --container-runtime=docker
net_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p kindnet-978988 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=kindnet --driver=docker  --container-runtime=docker: (1m5.556670163s)
--- PASS: TestNetworkPlugins/group/kindnet/Start (65.56s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/DNS (0.34s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/DNS
net_test.go:175: (dbg) Run:  kubectl --context auto-978988 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/auto/DNS (0.34s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/Localhost (0.21s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/Localhost
net_test.go:194: (dbg) Run:  kubectl --context auto-978988 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/auto/Localhost (0.21s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/HairPin (0.23s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/HairPin
net_test.go:264: (dbg) Run:  kubectl --context auto-978988 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/auto/HairPin (0.23s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/Start (80.96s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p calico-978988 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=calico --driver=docker  --container-runtime=docker
net_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p calico-978988 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=calico --driver=docker  --container-runtime=docker: (1m20.963579734s)
--- PASS: TestNetworkPlugins/group/calico/Start (80.96s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/ControllerPod (6.01s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/ControllerPod
net_test.go:120: (dbg) TestNetworkPlugins/group/kindnet/ControllerPod: waiting 10m0s for pods matching "app=kindnet" in namespace "kube-system" ...
helpers_test.go:344: "kindnet-z4srd" [9ac34960-8322-4d27-a682-51d8999d92e4] Running
E0620 17:56:33.078298    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/functional-493344/client.crt: no such file or directory
net_test.go:120: (dbg) TestNetworkPlugins/group/kindnet/ControllerPod: app=kindnet healthy within 6.003917521s
--- PASS: TestNetworkPlugins/group/kindnet/ControllerPod (6.01s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/KubeletFlags (0.37s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-arm64 ssh -p kindnet-978988 "pgrep -a kubelet"
--- PASS: TestNetworkPlugins/group/kindnet/KubeletFlags (0.37s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/NetCatPod (9.33s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context kindnet-978988 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/kindnet/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:344: "netcat-6bc787d567-nnrjc" [97d6bb0d-2c83-42d8-9ba1-d35c9b221ea3] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:344: "netcat-6bc787d567-nnrjc" [97d6bb0d-2c83-42d8-9ba1-d35c9b221ea3] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/kindnet/NetCatPod: app=netcat healthy within 9.003756277s
--- PASS: TestNetworkPlugins/group/kindnet/NetCatPod (9.33s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/DNS (0.2s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/DNS
net_test.go:175: (dbg) Run:  kubectl --context kindnet-978988 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/kindnet/DNS (0.20s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/Localhost (0.21s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/Localhost
net_test.go:194: (dbg) Run:  kubectl --context kindnet-978988 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/kindnet/Localhost (0.21s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/HairPin (0.19s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/HairPin
net_test.go:264: (dbg) Run:  kubectl --context kindnet-978988 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/kindnet/HairPin (0.19s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/Start (65.63s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p custom-flannel-978988 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=testdata/kube-flannel.yaml --driver=docker  --container-runtime=docker
net_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p custom-flannel-978988 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=testdata/kube-flannel.yaml --driver=docker  --container-runtime=docker: (1m5.631291386s)
--- PASS: TestNetworkPlugins/group/custom-flannel/Start (65.63s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/ControllerPod (6.01s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/ControllerPod
net_test.go:120: (dbg) TestNetworkPlugins/group/calico/ControllerPod: waiting 10m0s for pods matching "k8s-app=calico-node" in namespace "kube-system" ...
helpers_test.go:344: "calico-node-69zvl" [9aa62fc1-3c1d-4ec1-b867-2bfdf05fcdaf] Running
net_test.go:120: (dbg) TestNetworkPlugins/group/calico/ControllerPod: k8s-app=calico-node healthy within 6.007313685s
--- PASS: TestNetworkPlugins/group/calico/ControllerPod (6.01s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/KubeletFlags (0.38s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-arm64 ssh -p calico-978988 "pgrep -a kubelet"
--- PASS: TestNetworkPlugins/group/calico/KubeletFlags (0.38s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/NetCatPod (11.38s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context calico-978988 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/calico/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:344: "netcat-6bc787d567-nchnk" [d7acc44a-0f2e-46dc-b453-3aaafb2386ac] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
E0620 17:57:35.118388    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/skaffold-187542/client.crt: no such file or directory
helpers_test.go:344: "netcat-6bc787d567-nchnk" [d7acc44a-0f2e-46dc-b453-3aaafb2386ac] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/calico/NetCatPod: app=netcat healthy within 11.004402278s
--- PASS: TestNetworkPlugins/group/calico/NetCatPod (11.38s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/DNS (0.29s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/DNS
net_test.go:175: (dbg) Run:  kubectl --context calico-978988 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/calico/DNS (0.29s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/Localhost (0.25s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/Localhost
net_test.go:194: (dbg) Run:  kubectl --context calico-978988 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/calico/Localhost (0.25s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/HairPin (0.28s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/HairPin
net_test.go:264: (dbg) Run:  kubectl --context calico-978988 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/calico/HairPin (0.28s)

                                                
                                    
x
+
TestNetworkPlugins/group/false/Start (93.64s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/false/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p false-978988 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=false --driver=docker  --container-runtime=docker
net_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p false-978988 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=false --driver=docker  --container-runtime=docker: (1m33.635066799s)
--- PASS: TestNetworkPlugins/group/false/Start (93.64s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/KubeletFlags (0.36s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-arm64 ssh -p custom-flannel-978988 "pgrep -a kubelet"
--- PASS: TestNetworkPlugins/group/custom-flannel/KubeletFlags (0.36s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/NetCatPod (11.33s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context custom-flannel-978988 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/custom-flannel/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:344: "netcat-6bc787d567-sbx2w" [21a41935-778e-4647-ad95-a0fc4382e6b8] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:344: "netcat-6bc787d567-sbx2w" [21a41935-778e-4647-ad95-a0fc4382e6b8] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/custom-flannel/NetCatPod: app=netcat healthy within 11.003748082s
--- PASS: TestNetworkPlugins/group/custom-flannel/NetCatPod (11.33s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/DNS (0.26s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/DNS
net_test.go:175: (dbg) Run:  kubectl --context custom-flannel-978988 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/custom-flannel/DNS (0.26s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/Localhost (0.24s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/Localhost
net_test.go:194: (dbg) Run:  kubectl --context custom-flannel-978988 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/custom-flannel/Localhost (0.24s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/HairPin (0.2s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/HairPin
net_test.go:264: (dbg) Run:  kubectl --context custom-flannel-978988 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/custom-flannel/HairPin (0.20s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/Start (49.62s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p enable-default-cni-978988 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --enable-default-cni=true --driver=docker  --container-runtime=docker
net_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p enable-default-cni-978988 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --enable-default-cni=true --driver=docker  --container-runtime=docker: (49.621681232s)
--- PASS: TestNetworkPlugins/group/enable-default-cni/Start (49.62s)

                                                
                                    
x
+
TestNetworkPlugins/group/false/KubeletFlags (0.4s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/false/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-arm64 ssh -p false-978988 "pgrep -a kubelet"
--- PASS: TestNetworkPlugins/group/false/KubeletFlags (0.40s)

                                                
                                    
x
+
TestNetworkPlugins/group/false/NetCatPod (11.5s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/false/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context false-978988 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/false/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:344: "netcat-6bc787d567-j7m7t" [576ffac7-eb8d-463a-9e84-fea4a6728161] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:344: "netcat-6bc787d567-j7m7t" [576ffac7-eb8d-463a-9e84-fea4a6728161] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/false/NetCatPod: app=netcat healthy within 11.003704311s
--- PASS: TestNetworkPlugins/group/false/NetCatPod (11.50s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/KubeletFlags (0.3s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-arm64 ssh -p enable-default-cni-978988 "pgrep -a kubelet"
--- PASS: TestNetworkPlugins/group/enable-default-cni/KubeletFlags (0.30s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/NetCatPod (12.33s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context enable-default-cni-978988 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/enable-default-cni/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:344: "netcat-6bc787d567-h7lfx" [7b479ed1-4a46-4a55-bf7d-1dd55fae5ba2] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:344: "netcat-6bc787d567-h7lfx" [7b479ed1-4a46-4a55-bf7d-1dd55fae5ba2] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/enable-default-cni/NetCatPod: app=netcat healthy within 12.00413046s
--- PASS: TestNetworkPlugins/group/enable-default-cni/NetCatPod (12.33s)

                                                
                                    
x
+
TestNetworkPlugins/group/false/DNS (0.18s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/false/DNS
net_test.go:175: (dbg) Run:  kubectl --context false-978988 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/false/DNS (0.18s)

                                                
                                    
x
+
TestNetworkPlugins/group/false/Localhost (0.16s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/false/Localhost
net_test.go:194: (dbg) Run:  kubectl --context false-978988 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/false/Localhost (0.16s)

                                                
                                    
x
+
TestNetworkPlugins/group/false/HairPin (0.16s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/false/HairPin
net_test.go:264: (dbg) Run:  kubectl --context false-978988 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/false/HairPin (0.16s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/DNS (0.32s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/DNS
net_test.go:175: (dbg) Run:  kubectl --context enable-default-cni-978988 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/enable-default-cni/DNS (0.32s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/Localhost (0.27s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/Localhost
net_test.go:194: (dbg) Run:  kubectl --context enable-default-cni-978988 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/enable-default-cni/Localhost (0.27s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/HairPin (0.21s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/HairPin
net_test.go:264: (dbg) Run:  kubectl --context enable-default-cni-978988 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/enable-default-cni/HairPin (0.21s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/Start (66.56s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p flannel-978988 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=flannel --driver=docker  --container-runtime=docker
E0620 18:00:20.006654    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt: no such file or directory
net_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p flannel-978988 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=flannel --driver=docker  --container-runtime=docker: (1m6.558210444s)
--- PASS: TestNetworkPlugins/group/flannel/Start (66.56s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/Start (56.59s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p bridge-978988 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=bridge --driver=docker  --container-runtime=docker
E0620 18:00:23.924727    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/auto-978988/client.crt: no such file or directory
E0620 18:00:23.930125    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/auto-978988/client.crt: no such file or directory
E0620 18:00:23.940450    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/auto-978988/client.crt: no such file or directory
E0620 18:00:23.966048    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/auto-978988/client.crt: no such file or directory
E0620 18:00:24.007305    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/auto-978988/client.crt: no such file or directory
E0620 18:00:24.088449    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/auto-978988/client.crt: no such file or directory
E0620 18:00:24.249219    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/auto-978988/client.crt: no such file or directory
E0620 18:00:24.569891    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/auto-978988/client.crt: no such file or directory
E0620 18:00:25.210167    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/auto-978988/client.crt: no such file or directory
E0620 18:00:26.491091    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/auto-978988/client.crt: no such file or directory
E0620 18:00:29.051267    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/auto-978988/client.crt: no such file or directory
E0620 18:00:34.172342    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/auto-978988/client.crt: no such file or directory
E0620 18:00:36.957521    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt: no such file or directory
E0620 18:00:44.412971    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/auto-978988/client.crt: no such file or directory
E0620 18:01:04.893437    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/auto-978988/client.crt: no such file or directory
net_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p bridge-978988 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=bridge --driver=docker  --container-runtime=docker: (56.591200822s)
--- PASS: TestNetworkPlugins/group/bridge/Start (56.59s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/KubeletFlags (0.44s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-arm64 ssh -p bridge-978988 "pgrep -a kubelet"
--- PASS: TestNetworkPlugins/group/bridge/KubeletFlags (0.44s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/NetCatPod (11.4s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context bridge-978988 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/bridge/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:344: "netcat-6bc787d567-trqgq" [57691d48-b88d-4821-b421-5a7656085e21] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:344: "netcat-6bc787d567-trqgq" [57691d48-b88d-4821-b421-5a7656085e21] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/bridge/NetCatPod: app=netcat healthy within 11.004582105s
--- PASS: TestNetworkPlugins/group/bridge/NetCatPod (11.40s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/ControllerPod (6.01s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/ControllerPod
net_test.go:120: (dbg) TestNetworkPlugins/group/flannel/ControllerPod: waiting 10m0s for pods matching "app=flannel" in namespace "kube-flannel" ...
helpers_test.go:344: "kube-flannel-ds-bh6p8" [7d592cd5-f239-4e06-b0c6-59686c2cc2e7] Running
net_test.go:120: (dbg) TestNetworkPlugins/group/flannel/ControllerPod: app=flannel healthy within 6.004331637s
--- PASS: TestNetworkPlugins/group/flannel/ControllerPod (6.01s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/KubeletFlags (0.29s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-arm64 ssh -p flannel-978988 "pgrep -a kubelet"
--- PASS: TestNetworkPlugins/group/flannel/KubeletFlags (0.29s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/NetCatPod (11.27s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context flannel-978988 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/flannel/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:344: "netcat-6bc787d567-6lwls" [f5c54282-4f15-4344-a47a-780d626d3a82] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
E0620 18:01:30.853750    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kindnet-978988/client.crt: no such file or directory
E0620 18:01:30.859074    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kindnet-978988/client.crt: no such file or directory
E0620 18:01:30.869403    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kindnet-978988/client.crt: no such file or directory
E0620 18:01:30.889754    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kindnet-978988/client.crt: no such file or directory
E0620 18:01:30.930726    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kindnet-978988/client.crt: no such file or directory
E0620 18:01:31.010979    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kindnet-978988/client.crt: no such file or directory
E0620 18:01:31.171323    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kindnet-978988/client.crt: no such file or directory
E0620 18:01:31.492097    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kindnet-978988/client.crt: no such file or directory
helpers_test.go:344: "netcat-6bc787d567-6lwls" [f5c54282-4f15-4344-a47a-780d626d3a82] Running
E0620 18:01:35.973869    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kindnet-978988/client.crt: no such file or directory
net_test.go:163: (dbg) TestNetworkPlugins/group/flannel/NetCatPod: app=netcat healthy within 11.003550286s
--- PASS: TestNetworkPlugins/group/flannel/NetCatPod (11.27s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/DNS (0.24s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/DNS
net_test.go:175: (dbg) Run:  kubectl --context bridge-978988 exec deployment/netcat -- nslookup kubernetes.default
E0620 18:01:32.132967    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kindnet-978988/client.crt: no such file or directory
--- PASS: TestNetworkPlugins/group/bridge/DNS (0.24s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/Localhost (0.24s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/Localhost
net_test.go:194: (dbg) Run:  kubectl --context bridge-978988 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/bridge/Localhost (0.24s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/HairPin (0.23s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/HairPin
net_test.go:264: (dbg) Run:  kubectl --context bridge-978988 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/bridge/HairPin (0.23s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/DNS (0.27s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/DNS
net_test.go:175: (dbg) Run:  kubectl --context flannel-978988 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/flannel/DNS (0.27s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/Localhost (0.24s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/Localhost
net_test.go:194: (dbg) Run:  kubectl --context flannel-978988 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
E0620 18:01:41.094464    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kindnet-978988/client.crt: no such file or directory
--- PASS: TestNetworkPlugins/group/flannel/Localhost (0.24s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/HairPin (0.36s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/HairPin
net_test.go:264: (dbg) Run:  kubectl --context flannel-978988 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/flannel/HairPin (0.36s)

                                                
                                    
x
+
TestNetworkPlugins/group/kubenet/Start (94.77s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kubenet/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p kubenet-978988 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --network-plugin=kubenet --driver=docker  --container-runtime=docker
net_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p kubenet-978988 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --network-plugin=kubenet --driver=docker  --container-runtime=docker: (1m34.764904153s)
--- PASS: TestNetworkPlugins/group/kubenet/Start (94.77s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/FirstStart (151.84s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/FirstStart
start_stop_delete_test.go:186: (dbg) Run:  out/minikube-linux-arm64 start -p old-k8s-version-577369 --memory=2200 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker  --container-runtime=docker --kubernetes-version=v1.20.0
E0620 18:02:11.815546    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kindnet-978988/client.crt: no such file or directory
E0620 18:02:22.821703    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/calico-978988/client.crt: no such file or directory
E0620 18:02:22.826888    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/calico-978988/client.crt: no such file or directory
E0620 18:02:22.839045    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/calico-978988/client.crt: no such file or directory
E0620 18:02:22.859327    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/calico-978988/client.crt: no such file or directory
E0620 18:02:22.899539    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/calico-978988/client.crt: no such file or directory
E0620 18:02:22.979780    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/calico-978988/client.crt: no such file or directory
E0620 18:02:23.140308    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/calico-978988/client.crt: no such file or directory
E0620 18:02:23.461011    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/calico-978988/client.crt: no such file or directory
E0620 18:02:24.101771    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/calico-978988/client.crt: no such file or directory
E0620 18:02:25.382022    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/calico-978988/client.crt: no such file or directory
E0620 18:02:27.942959    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/calico-978988/client.crt: no such file or directory
E0620 18:02:33.063712    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/calico-978988/client.crt: no such file or directory
E0620 18:02:35.118422    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/skaffold-187542/client.crt: no such file or directory
E0620 18:02:43.304366    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/calico-978988/client.crt: no such file or directory
E0620 18:02:52.775728    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kindnet-978988/client.crt: no such file or directory
E0620 18:03:03.784853    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/calico-978988/client.crt: no such file or directory
E0620 18:03:07.775118    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/auto-978988/client.crt: no such file or directory
E0620 18:03:19.978907    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/custom-flannel-978988/client.crt: no such file or directory
E0620 18:03:19.984232    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/custom-flannel-978988/client.crt: no such file or directory
E0620 18:03:19.994544    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/custom-flannel-978988/client.crt: no such file or directory
E0620 18:03:20.014952    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/custom-flannel-978988/client.crt: no such file or directory
E0620 18:03:20.055313    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/custom-flannel-978988/client.crt: no such file or directory
E0620 18:03:20.135621    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/custom-flannel-978988/client.crt: no such file or directory
E0620 18:03:20.296009    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/custom-flannel-978988/client.crt: no such file or directory
E0620 18:03:20.616547    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/custom-flannel-978988/client.crt: no such file or directory
E0620 18:03:21.257672    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/custom-flannel-978988/client.crt: no such file or directory
E0620 18:03:22.537909    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/custom-flannel-978988/client.crt: no such file or directory
E0620 18:03:25.098230    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/custom-flannel-978988/client.crt: no such file or directory
start_stop_delete_test.go:186: (dbg) Done: out/minikube-linux-arm64 start -p old-k8s-version-577369 --memory=2200 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker  --container-runtime=docker --kubernetes-version=v1.20.0: (2m31.841714125s)
--- PASS: TestStartStop/group/old-k8s-version/serial/FirstStart (151.84s)

                                                
                                    
x
+
TestNetworkPlugins/group/kubenet/KubeletFlags (0.29s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kubenet/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-arm64 ssh -p kubenet-978988 "pgrep -a kubelet"
--- PASS: TestNetworkPlugins/group/kubenet/KubeletFlags (0.29s)

                                                
                                    
x
+
TestNetworkPlugins/group/kubenet/NetCatPod (11.32s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kubenet/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context kubenet-978988 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/kubenet/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:344: "netcat-6bc787d567-b68c9" [628c1138-99f9-413b-be82-d6b5d1ab5fb9] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
E0620 18:03:30.219272    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/custom-flannel-978988/client.crt: no such file or directory
helpers_test.go:344: "netcat-6bc787d567-b68c9" [628c1138-99f9-413b-be82-d6b5d1ab5fb9] Running
E0620 18:03:40.460306    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/custom-flannel-978988/client.crt: no such file or directory
net_test.go:163: (dbg) TestNetworkPlugins/group/kubenet/NetCatPod: app=netcat healthy within 11.004117388s
--- PASS: TestNetworkPlugins/group/kubenet/NetCatPod (11.32s)

                                                
                                    
x
+
TestNetworkPlugins/group/kubenet/DNS (0.18s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kubenet/DNS
net_test.go:175: (dbg) Run:  kubectl --context kubenet-978988 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/kubenet/DNS (0.18s)

                                                
                                    
x
+
TestNetworkPlugins/group/kubenet/Localhost (0.16s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kubenet/Localhost
net_test.go:194: (dbg) Run:  kubectl --context kubenet-978988 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/kubenet/Localhost (0.16s)

                                                
                                    
x
+
TestNetworkPlugins/group/kubenet/HairPin (0.16s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kubenet/HairPin
net_test.go:264: (dbg) Run:  kubectl --context kubenet-978988 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/kubenet/HairPin (0.16s)
E0620 18:18:19.978209    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/custom-flannel-978988/client.crt: no such file or directory

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/FirstStart (50.26s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/FirstStart
start_stop_delete_test.go:186: (dbg) Run:  out/minikube-linux-arm64 start -p embed-certs-221669 --memory=2200 --alsologtostderr --wait=true --embed-certs --driver=docker  --container-runtime=docker --kubernetes-version=v1.30.2
E0620 18:04:14.696604    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kindnet-978988/client.crt: no such file or directory
start_stop_delete_test.go:186: (dbg) Done: out/minikube-linux-arm64 start -p embed-certs-221669 --memory=2200 --alsologtostderr --wait=true --embed-certs --driver=docker  --container-runtime=docker --kubernetes-version=v1.30.2: (50.259207808s)
--- PASS: TestStartStop/group/embed-certs/serial/FirstStart (50.26s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/DeployApp (8.74s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/DeployApp
start_stop_delete_test.go:196: (dbg) Run:  kubectl --context old-k8s-version-577369 create -f testdata/busybox.yaml
start_stop_delete_test.go:196: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:344: "busybox" [56c2ba6f-5817-4a4b-893d-273392802e8d] Pending
E0620 18:04:40.792304    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/false-978988/client.crt: no such file or directory
E0620 18:04:40.797532    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/false-978988/client.crt: no such file or directory
E0620 18:04:40.807749    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/false-978988/client.crt: no such file or directory
E0620 18:04:40.827992    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/false-978988/client.crt: no such file or directory
E0620 18:04:40.868411    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/false-978988/client.crt: no such file or directory
E0620 18:04:40.949704    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/false-978988/client.crt: no such file or directory
E0620 18:04:41.109957    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/false-978988/client.crt: no such file or directory
helpers_test.go:344: "busybox" [56c2ba6f-5817-4a4b-893d-273392802e8d] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
E0620 18:04:41.430124    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/false-978988/client.crt: no such file or directory
E0620 18:04:41.901385    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/custom-flannel-978988/client.crt: no such file or directory
E0620 18:04:42.070960    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/false-978988/client.crt: no such file or directory
helpers_test.go:344: "busybox" [56c2ba6f-5817-4a4b-893d-273392802e8d] Running
E0620 18:04:43.351996    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/false-978988/client.crt: no such file or directory
E0620 18:04:45.912526    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/false-978988/client.crt: no such file or directory
E0620 18:04:46.051022    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/enable-default-cni-978988/client.crt: no such file or directory
E0620 18:04:46.056242    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/enable-default-cni-978988/client.crt: no such file or directory
E0620 18:04:46.066456    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/enable-default-cni-978988/client.crt: no such file or directory
E0620 18:04:46.086848    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/enable-default-cni-978988/client.crt: no such file or directory
E0620 18:04:46.127143    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/enable-default-cni-978988/client.crt: no such file or directory
E0620 18:04:46.207965    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/enable-default-cni-978988/client.crt: no such file or directory
E0620 18:04:46.368257    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/enable-default-cni-978988/client.crt: no such file or directory
E0620 18:04:46.689083    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/enable-default-cni-978988/client.crt: no such file or directory
E0620 18:04:47.329867    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/enable-default-cni-978988/client.crt: no such file or directory
start_stop_delete_test.go:196: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: integration-test=busybox healthy within 8.00420936s
start_stop_delete_test.go:196: (dbg) Run:  kubectl --context old-k8s-version-577369 exec busybox -- /bin/sh -c "ulimit -n"
--- PASS: TestStartStop/group/old-k8s-version/serial/DeployApp (8.74s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/EnableAddonWhileActive (1.89s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/EnableAddonWhileActive
start_stop_delete_test.go:205: (dbg) Run:  out/minikube-linux-arm64 addons enable metrics-server -p old-k8s-version-577369 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain
E0620 18:04:48.610696    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/enable-default-cni-978988/client.crt: no such file or directory
start_stop_delete_test.go:205: (dbg) Done: out/minikube-linux-arm64 addons enable metrics-server -p old-k8s-version-577369 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain: (1.699994468s)
start_stop_delete_test.go:215: (dbg) Run:  kubectl --context old-k8s-version-577369 describe deploy/metrics-server -n kube-system
--- PASS: TestStartStop/group/old-k8s-version/serial/EnableAddonWhileActive (1.89s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/Stop (11.27s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/Stop
start_stop_delete_test.go:228: (dbg) Run:  out/minikube-linux-arm64 stop -p old-k8s-version-577369 --alsologtostderr -v=3
E0620 18:04:51.033403    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/false-978988/client.crt: no such file or directory
E0620 18:04:51.170955    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/enable-default-cni-978988/client.crt: no such file or directory
start_stop_delete_test.go:228: (dbg) Done: out/minikube-linux-arm64 stop -p old-k8s-version-577369 --alsologtostderr -v=3: (11.266403142s)
--- PASS: TestStartStop/group/old-k8s-version/serial/Stop (11.27s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/DeployApp (8.36s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/DeployApp
start_stop_delete_test.go:196: (dbg) Run:  kubectl --context embed-certs-221669 create -f testdata/busybox.yaml
start_stop_delete_test.go:196: (dbg) TestStartStop/group/embed-certs/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:344: "busybox" [70edfcdf-38e4-4cae-977d-e435d219727b] Pending
helpers_test.go:344: "busybox" [70edfcdf-38e4-4cae-977d-e435d219727b] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:344: "busybox" [70edfcdf-38e4-4cae-977d-e435d219727b] Running
E0620 18:04:56.291352    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/enable-default-cni-978988/client.crt: no such file or directory
start_stop_delete_test.go:196: (dbg) TestStartStop/group/embed-certs/serial/DeployApp: integration-test=busybox healthy within 8.003663741s
start_stop_delete_test.go:196: (dbg) Run:  kubectl --context embed-certs-221669 exec busybox -- /bin/sh -c "ulimit -n"
--- PASS: TestStartStop/group/embed-certs/serial/DeployApp (8.36s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/EnableAddonWhileActive (1.56s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/EnableAddonWhileActive
start_stop_delete_test.go:205: (dbg) Run:  out/minikube-linux-arm64 addons enable metrics-server -p embed-certs-221669 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain
E0620 18:05:01.274581    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/false-978988/client.crt: no such file or directory
start_stop_delete_test.go:205: (dbg) Done: out/minikube-linux-arm64 addons enable metrics-server -p embed-certs-221669 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain: (1.39748553s)
start_stop_delete_test.go:215: (dbg) Run:  kubectl --context embed-certs-221669 describe deploy/metrics-server -n kube-system
--- PASS: TestStartStop/group/embed-certs/serial/EnableAddonWhileActive (1.56s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/EnableAddonAfterStop (0.26s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/EnableAddonAfterStop
start_stop_delete_test.go:239: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p old-k8s-version-577369 -n old-k8s-version-577369
start_stop_delete_test.go:239: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Host}} -p old-k8s-version-577369 -n old-k8s-version-577369: exit status 7 (105.435249ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:239: status error: exit status 7 (may be ok)
start_stop_delete_test.go:246: (dbg) Run:  out/minikube-linux-arm64 addons enable dashboard -p old-k8s-version-577369 --images=MetricsScraper=registry.k8s.io/echoserver:1.4
--- PASS: TestStartStop/group/old-k8s-version/serial/EnableAddonAfterStop (0.26s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/Stop (11.36s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/Stop
start_stop_delete_test.go:228: (dbg) Run:  out/minikube-linux-arm64 stop -p embed-certs-221669 --alsologtostderr -v=3
E0620 18:05:06.532326    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/enable-default-cni-978988/client.crt: no such file or directory
E0620 18:05:06.665641    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/calico-978988/client.crt: no such file or directory
start_stop_delete_test.go:228: (dbg) Done: out/minikube-linux-arm64 stop -p embed-certs-221669 --alsologtostderr -v=3: (11.361156004s)
--- PASS: TestStartStop/group/embed-certs/serial/Stop (11.36s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/EnableAddonAfterStop (0.29s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/EnableAddonAfterStop
start_stop_delete_test.go:239: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p embed-certs-221669 -n embed-certs-221669
start_stop_delete_test.go:239: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Host}} -p embed-certs-221669 -n embed-certs-221669: exit status 7 (119.083737ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:239: status error: exit status 7 (may be ok)
start_stop_delete_test.go:246: (dbg) Run:  out/minikube-linux-arm64 addons enable dashboard -p embed-certs-221669 --images=MetricsScraper=registry.k8s.io/echoserver:1.4
--- PASS: TestStartStop/group/embed-certs/serial/EnableAddonAfterStop (0.29s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/SecondStart (293.43s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/SecondStart
start_stop_delete_test.go:256: (dbg) Run:  out/minikube-linux-arm64 start -p embed-certs-221669 --memory=2200 --alsologtostderr --wait=true --embed-certs --driver=docker  --container-runtime=docker --kubernetes-version=v1.30.2
E0620 18:05:21.754947    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/false-978988/client.crt: no such file or directory
E0620 18:05:23.921193    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/auto-978988/client.crt: no such file or directory
E0620 18:05:27.013100    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/enable-default-cni-978988/client.crt: no such file or directory
E0620 18:05:36.957991    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt: no such file or directory
E0620 18:05:51.616267    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/auto-978988/client.crt: no such file or directory
E0620 18:06:02.715145    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/false-978988/client.crt: no such file or directory
E0620 18:06:03.822448    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/custom-flannel-978988/client.crt: no such file or directory
E0620 18:06:07.974083    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/enable-default-cni-978988/client.crt: no such file or directory
E0620 18:06:16.126029    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/functional-493344/client.crt: no such file or directory
E0620 18:06:20.893041    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/bridge-978988/client.crt: no such file or directory
E0620 18:06:20.898295    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/bridge-978988/client.crt: no such file or directory
E0620 18:06:20.908494    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/bridge-978988/client.crt: no such file or directory
E0620 18:06:20.928773    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/bridge-978988/client.crt: no such file or directory
E0620 18:06:20.969000    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/bridge-978988/client.crt: no such file or directory
E0620 18:06:21.049300    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/bridge-978988/client.crt: no such file or directory
E0620 18:06:21.209699    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/bridge-978988/client.crt: no such file or directory
E0620 18:06:21.530213    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/bridge-978988/client.crt: no such file or directory
E0620 18:06:22.171080    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/bridge-978988/client.crt: no such file or directory
E0620 18:06:23.034090    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/flannel-978988/client.crt: no such file or directory
E0620 18:06:23.039321    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/flannel-978988/client.crt: no such file or directory
E0620 18:06:23.049584    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/flannel-978988/client.crt: no such file or directory
E0620 18:06:23.069924    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/flannel-978988/client.crt: no such file or directory
E0620 18:06:23.110318    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/flannel-978988/client.crt: no such file or directory
E0620 18:06:23.190665    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/flannel-978988/client.crt: no such file or directory
E0620 18:06:23.351088    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/flannel-978988/client.crt: no such file or directory
E0620 18:06:23.451406    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/bridge-978988/client.crt: no such file or directory
E0620 18:06:23.672028    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/flannel-978988/client.crt: no such file or directory
E0620 18:06:24.313044    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/flannel-978988/client.crt: no such file or directory
E0620 18:06:25.593189    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/flannel-978988/client.crt: no such file or directory
E0620 18:06:26.012141    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/bridge-978988/client.crt: no such file or directory
E0620 18:06:28.153424    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/flannel-978988/client.crt: no such file or directory
E0620 18:06:30.853480    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kindnet-978988/client.crt: no such file or directory
E0620 18:06:31.132507    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/bridge-978988/client.crt: no such file or directory
E0620 18:06:33.078690    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/functional-493344/client.crt: no such file or directory
E0620 18:06:33.274011    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/flannel-978988/client.crt: no such file or directory
E0620 18:06:41.373346    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/bridge-978988/client.crt: no such file or directory
E0620 18:06:43.514576    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/flannel-978988/client.crt: no such file or directory
E0620 18:06:58.537640    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kindnet-978988/client.crt: no such file or directory
E0620 18:07:01.853511    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/bridge-978988/client.crt: no such file or directory
E0620 18:07:03.994761    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/flannel-978988/client.crt: no such file or directory
E0620 18:07:22.820908    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/calico-978988/client.crt: no such file or directory
E0620 18:07:24.635341    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/false-978988/client.crt: no such file or directory
E0620 18:07:29.894911    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/enable-default-cni-978988/client.crt: no such file or directory
E0620 18:07:35.118917    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/skaffold-187542/client.crt: no such file or directory
E0620 18:07:42.813914    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/bridge-978988/client.crt: no such file or directory
E0620 18:07:44.955857    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/flannel-978988/client.crt: no such file or directory
E0620 18:07:50.506103    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/calico-978988/client.crt: no such file or directory
E0620 18:08:19.978870    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/custom-flannel-978988/client.crt: no such file or directory
E0620 18:08:30.191407    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kubenet-978988/client.crt: no such file or directory
E0620 18:08:30.196792    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kubenet-978988/client.crt: no such file or directory
E0620 18:08:30.207131    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kubenet-978988/client.crt: no such file or directory
E0620 18:08:30.227383    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kubenet-978988/client.crt: no such file or directory
E0620 18:08:30.267646    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kubenet-978988/client.crt: no such file or directory
E0620 18:08:30.347964    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kubenet-978988/client.crt: no such file or directory
E0620 18:08:30.508073    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kubenet-978988/client.crt: no such file or directory
E0620 18:08:30.828628    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kubenet-978988/client.crt: no such file or directory
E0620 18:08:31.469790    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kubenet-978988/client.crt: no such file or directory
E0620 18:08:32.750101    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kubenet-978988/client.crt: no such file or directory
E0620 18:08:35.310254    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kubenet-978988/client.crt: no such file or directory
E0620 18:08:40.430821    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kubenet-978988/client.crt: no such file or directory
E0620 18:08:47.662689    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/custom-flannel-978988/client.crt: no such file or directory
E0620 18:08:50.671300    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kubenet-978988/client.crt: no such file or directory
E0620 18:09:04.734113    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/bridge-978988/client.crt: no such file or directory
E0620 18:09:06.876449    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/flannel-978988/client.crt: no such file or directory
E0620 18:09:11.151960    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kubenet-978988/client.crt: no such file or directory
E0620 18:09:40.792316    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/false-978988/client.crt: no such file or directory
E0620 18:09:46.051220    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/enable-default-cni-978988/client.crt: no such file or directory
E0620 18:09:52.112461    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kubenet-978988/client.crt: no such file or directory
start_stop_delete_test.go:256: (dbg) Done: out/minikube-linux-arm64 start -p embed-certs-221669 --memory=2200 --alsologtostderr --wait=true --embed-certs --driver=docker  --container-runtime=docker --kubernetes-version=v1.30.2: (4m53.078425405s)
start_stop_delete_test.go:262: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p embed-certs-221669 -n embed-certs-221669
--- PASS: TestStartStop/group/embed-certs/serial/SecondStart (293.43s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop (6.01s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop
start_stop_delete_test.go:274: (dbg) TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:344: "kubernetes-dashboard-779776cb65-v662w" [900672fc-d783-411f-a711-505c1c22b8dc] Running
E0620 18:10:08.475850    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/false-978988/client.crt: no such file or directory
E0620 18:10:13.735828    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/enable-default-cni-978988/client.crt: no such file or directory
start_stop_delete_test.go:274: (dbg) TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 6.003791792s
--- PASS: TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop (6.01s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/AddonExistsAfterStop (5.1s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/AddonExistsAfterStop
start_stop_delete_test.go:287: (dbg) TestStartStop/group/embed-certs/serial/AddonExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:344: "kubernetes-dashboard-779776cb65-v662w" [900672fc-d783-411f-a711-505c1c22b8dc] Running
start_stop_delete_test.go:287: (dbg) TestStartStop/group/embed-certs/serial/AddonExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 5.004142014s
start_stop_delete_test.go:291: (dbg) Run:  kubectl --context embed-certs-221669 describe deploy/dashboard-metrics-scraper -n kubernetes-dashboard
--- PASS: TestStartStop/group/embed-certs/serial/AddonExistsAfterStop (5.10s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/VerifyKubernetesImages (0.27s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/VerifyKubernetesImages
start_stop_delete_test.go:304: (dbg) Run:  out/minikube-linux-arm64 -p embed-certs-221669 image list --format=json
start_stop_delete_test.go:304: Found non-minikube image: gcr.io/k8s-minikube/busybox:1.28.4-glibc
--- PASS: TestStartStop/group/embed-certs/serial/VerifyKubernetesImages (0.27s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/Pause (2.91s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/Pause
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 pause -p embed-certs-221669 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p embed-certs-221669 -n embed-certs-221669
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.APIServer}} -p embed-certs-221669 -n embed-certs-221669: exit status 2 (318.168656ms)

                                                
                                                
-- stdout --
	Paused

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p embed-certs-221669 -n embed-certs-221669
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Kubelet}} -p embed-certs-221669 -n embed-certs-221669: exit status 2 (319.126683ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 unpause -p embed-certs-221669 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p embed-certs-221669 -n embed-certs-221669
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p embed-certs-221669 -n embed-certs-221669
--- PASS: TestStartStop/group/embed-certs/serial/Pause (2.91s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/FirstStart (99s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/FirstStart
start_stop_delete_test.go:186: (dbg) Run:  out/minikube-linux-arm64 start -p no-preload-581163 --memory=2200 --alsologtostderr --wait=true --preload=false --driver=docker  --container-runtime=docker --kubernetes-version=v1.30.2
E0620 18:10:36.957335    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt: no such file or directory
E0620 18:11:14.033600    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kubenet-978988/client.crt: no such file or directory
start_stop_delete_test.go:186: (dbg) Done: out/minikube-linux-arm64 start -p no-preload-581163 --memory=2200 --alsologtostderr --wait=true --preload=false --driver=docker  --container-runtime=docker --kubernetes-version=v1.30.2: (1m39.002270862s)
--- PASS: TestStartStop/group/no-preload/serial/FirstStart (99.00s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop (6.01s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop
start_stop_delete_test.go:274: (dbg) TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:344: "kubernetes-dashboard-cd95d586-l4fmb" [846474e9-0fd0-42ae-b4d2-e70c7570b97a] Running
E0620 18:11:20.892656    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/bridge-978988/client.crt: no such file or directory
E0620 18:11:23.034117    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/flannel-978988/client.crt: no such file or directory
start_stop_delete_test.go:274: (dbg) TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 6.004971793s
--- PASS: TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop (6.01s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop (5.12s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop
start_stop_delete_test.go:287: (dbg) TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:344: "kubernetes-dashboard-cd95d586-l4fmb" [846474e9-0fd0-42ae-b4d2-e70c7570b97a] Running
start_stop_delete_test.go:287: (dbg) TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 5.01036778s
start_stop_delete_test.go:291: (dbg) Run:  kubectl --context old-k8s-version-577369 describe deploy/dashboard-metrics-scraper -n kubernetes-dashboard
--- PASS: TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop (5.12s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/VerifyKubernetesImages (0.25s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/VerifyKubernetesImages
start_stop_delete_test.go:304: (dbg) Run:  out/minikube-linux-arm64 -p old-k8s-version-577369 image list --format=json
start_stop_delete_test.go:304: Found non-minikube image: gcr.io/k8s-minikube/busybox:1.28.4-glibc
--- PASS: TestStartStop/group/old-k8s-version/serial/VerifyKubernetesImages (0.25s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/Pause (2.91s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/Pause
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 pause -p old-k8s-version-577369 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p old-k8s-version-577369 -n old-k8s-version-577369
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.APIServer}} -p old-k8s-version-577369 -n old-k8s-version-577369: exit status 2 (310.616189ms)

                                                
                                                
-- stdout --
	Paused

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p old-k8s-version-577369 -n old-k8s-version-577369
E0620 18:11:30.853991    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kindnet-978988/client.crt: no such file or directory
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Kubelet}} -p old-k8s-version-577369 -n old-k8s-version-577369: exit status 2 (324.577681ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 unpause -p old-k8s-version-577369 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p old-k8s-version-577369 -n old-k8s-version-577369
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p old-k8s-version-577369 -n old-k8s-version-577369
--- PASS: TestStartStop/group/old-k8s-version/serial/Pause (2.91s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/FirstStart (85.84s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/FirstStart
start_stop_delete_test.go:186: (dbg) Run:  out/minikube-linux-arm64 start -p default-k8s-diff-port-441994 --memory=2200 --alsologtostderr --wait=true --apiserver-port=8444 --driver=docker  --container-runtime=docker --kubernetes-version=v1.30.2
E0620 18:11:48.575019    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/bridge-978988/client.crt: no such file or directory
E0620 18:11:50.716798    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/flannel-978988/client.crt: no such file or directory
start_stop_delete_test.go:186: (dbg) Done: out/minikube-linux-arm64 start -p default-k8s-diff-port-441994 --memory=2200 --alsologtostderr --wait=true --apiserver-port=8444 --driver=docker  --container-runtime=docker --kubernetes-version=v1.30.2: (1m25.839560049s)
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/FirstStart (85.84s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/DeployApp (8.44s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/DeployApp
start_stop_delete_test.go:196: (dbg) Run:  kubectl --context no-preload-581163 create -f testdata/busybox.yaml
start_stop_delete_test.go:196: (dbg) TestStartStop/group/no-preload/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:344: "busybox" [8889d2e0-256e-4dd6-a3cd-4373a8deeed6] Pending
helpers_test.go:344: "busybox" [8889d2e0-256e-4dd6-a3cd-4373a8deeed6] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:344: "busybox" [8889d2e0-256e-4dd6-a3cd-4373a8deeed6] Running
start_stop_delete_test.go:196: (dbg) TestStartStop/group/no-preload/serial/DeployApp: integration-test=busybox healthy within 8.004069468s
start_stop_delete_test.go:196: (dbg) Run:  kubectl --context no-preload-581163 exec busybox -- /bin/sh -c "ulimit -n"
--- PASS: TestStartStop/group/no-preload/serial/DeployApp (8.44s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/EnableAddonWhileActive (1.13s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/EnableAddonWhileActive
start_stop_delete_test.go:205: (dbg) Run:  out/minikube-linux-arm64 addons enable metrics-server -p no-preload-581163 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain
start_stop_delete_test.go:205: (dbg) Done: out/minikube-linux-arm64 addons enable metrics-server -p no-preload-581163 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain: (1.014511958s)
start_stop_delete_test.go:215: (dbg) Run:  kubectl --context no-preload-581163 describe deploy/metrics-server -n kube-system
--- PASS: TestStartStop/group/no-preload/serial/EnableAddonWhileActive (1.13s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/Stop (10.87s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/Stop
start_stop_delete_test.go:228: (dbg) Run:  out/minikube-linux-arm64 stop -p no-preload-581163 --alsologtostderr -v=3
E0620 18:12:22.821705    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/calico-978988/client.crt: no such file or directory
start_stop_delete_test.go:228: (dbg) Done: out/minikube-linux-arm64 stop -p no-preload-581163 --alsologtostderr -v=3: (10.865767242s)
--- PASS: TestStartStop/group/no-preload/serial/Stop (10.87s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/EnableAddonAfterStop (0.2s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/EnableAddonAfterStop
start_stop_delete_test.go:239: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p no-preload-581163 -n no-preload-581163
start_stop_delete_test.go:239: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Host}} -p no-preload-581163 -n no-preload-581163: exit status 7 (70.688487ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:239: status error: exit status 7 (may be ok)
start_stop_delete_test.go:246: (dbg) Run:  out/minikube-linux-arm64 addons enable dashboard -p no-preload-581163 --images=MetricsScraper=registry.k8s.io/echoserver:1.4
--- PASS: TestStartStop/group/no-preload/serial/EnableAddonAfterStop (0.20s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/SecondStart (266.48s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/SecondStart
start_stop_delete_test.go:256: (dbg) Run:  out/minikube-linux-arm64 start -p no-preload-581163 --memory=2200 --alsologtostderr --wait=true --preload=false --driver=docker  --container-runtime=docker --kubernetes-version=v1.30.2
E0620 18:12:35.118436    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/skaffold-187542/client.crt: no such file or directory
start_stop_delete_test.go:256: (dbg) Done: out/minikube-linux-arm64 start -p no-preload-581163 --memory=2200 --alsologtostderr --wait=true --preload=false --driver=docker  --container-runtime=docker --kubernetes-version=v1.30.2: (4m26.076577378s)
start_stop_delete_test.go:262: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p no-preload-581163 -n no-preload-581163
--- PASS: TestStartStop/group/no-preload/serial/SecondStart (266.48s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/DeployApp (8.42s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/DeployApp
start_stop_delete_test.go:196: (dbg) Run:  kubectl --context default-k8s-diff-port-441994 create -f testdata/busybox.yaml
start_stop_delete_test.go:196: (dbg) TestStartStop/group/default-k8s-diff-port/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:344: "busybox" [7a04542f-c619-4e6f-a4e9-c33a5f69aedd] Pending
helpers_test.go:344: "busybox" [7a04542f-c619-4e6f-a4e9-c33a5f69aedd] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:344: "busybox" [7a04542f-c619-4e6f-a4e9-c33a5f69aedd] Running
start_stop_delete_test.go:196: (dbg) TestStartStop/group/default-k8s-diff-port/serial/DeployApp: integration-test=busybox healthy within 8.003522536s
start_stop_delete_test.go:196: (dbg) Run:  kubectl --context default-k8s-diff-port-441994 exec busybox -- /bin/sh -c "ulimit -n"
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/DeployApp (8.42s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/EnableAddonWhileActive (1.16s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/EnableAddonWhileActive
start_stop_delete_test.go:205: (dbg) Run:  out/minikube-linux-arm64 addons enable metrics-server -p default-k8s-diff-port-441994 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain
start_stop_delete_test.go:205: (dbg) Done: out/minikube-linux-arm64 addons enable metrics-server -p default-k8s-diff-port-441994 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain: (1.030822846s)
start_stop_delete_test.go:215: (dbg) Run:  kubectl --context default-k8s-diff-port-441994 describe deploy/metrics-server -n kube-system
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/EnableAddonWhileActive (1.16s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/Stop (10.87s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/Stop
start_stop_delete_test.go:228: (dbg) Run:  out/minikube-linux-arm64 stop -p default-k8s-diff-port-441994 --alsologtostderr -v=3
E0620 18:13:19.978731    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/custom-flannel-978988/client.crt: no such file or directory
start_stop_delete_test.go:228: (dbg) Done: out/minikube-linux-arm64 stop -p default-k8s-diff-port-441994 --alsologtostderr -v=3: (10.866323785s)
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/Stop (10.87s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/EnableAddonAfterStop (0.19s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/EnableAddonAfterStop
start_stop_delete_test.go:239: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p default-k8s-diff-port-441994 -n default-k8s-diff-port-441994
start_stop_delete_test.go:239: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Host}} -p default-k8s-diff-port-441994 -n default-k8s-diff-port-441994: exit status 7 (70.027894ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:239: status error: exit status 7 (may be ok)
start_stop_delete_test.go:246: (dbg) Run:  out/minikube-linux-arm64 addons enable dashboard -p default-k8s-diff-port-441994 --images=MetricsScraper=registry.k8s.io/echoserver:1.4
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/EnableAddonAfterStop (0.19s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/SecondStart (267.47s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/SecondStart
start_stop_delete_test.go:256: (dbg) Run:  out/minikube-linux-arm64 start -p default-k8s-diff-port-441994 --memory=2200 --alsologtostderr --wait=true --apiserver-port=8444 --driver=docker  --container-runtime=docker --kubernetes-version=v1.30.2
E0620 18:13:30.190891    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kubenet-978988/client.crt: no such file or directory
E0620 18:13:57.874409    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kubenet-978988/client.crt: no such file or directory
E0620 18:14:40.178453    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/old-k8s-version-577369/client.crt: no such file or directory
E0620 18:14:40.184210    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/old-k8s-version-577369/client.crt: no such file or directory
E0620 18:14:40.194699    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/old-k8s-version-577369/client.crt: no such file or directory
E0620 18:14:40.214961    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/old-k8s-version-577369/client.crt: no such file or directory
E0620 18:14:40.255252    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/old-k8s-version-577369/client.crt: no such file or directory
E0620 18:14:40.335543    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/old-k8s-version-577369/client.crt: no such file or directory
E0620 18:14:40.495762    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/old-k8s-version-577369/client.crt: no such file or directory
E0620 18:14:40.792185    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/false-978988/client.crt: no such file or directory
E0620 18:14:40.816286    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/old-k8s-version-577369/client.crt: no such file or directory
E0620 18:14:41.456783    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/old-k8s-version-577369/client.crt: no such file or directory
E0620 18:14:42.736950    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/old-k8s-version-577369/client.crt: no such file or directory
E0620 18:14:45.298068    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/old-k8s-version-577369/client.crt: no such file or directory
E0620 18:14:46.050311    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/enable-default-cni-978988/client.crt: no such file or directory
E0620 18:14:50.418787    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/old-k8s-version-577369/client.crt: no such file or directory
E0620 18:15:00.659507    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/old-k8s-version-577369/client.crt: no such file or directory
E0620 18:15:21.140367    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/old-k8s-version-577369/client.crt: no such file or directory
E0620 18:15:23.921455    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/auto-978988/client.crt: no such file or directory
E0620 18:15:36.958099    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt: no such file or directory
E0620 18:16:02.100597    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/old-k8s-version-577369/client.crt: no such file or directory
E0620 18:16:20.892256    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/bridge-978988/client.crt: no such file or directory
E0620 18:16:23.033752    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/flannel-978988/client.crt: no such file or directory
E0620 18:16:30.854063    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kindnet-978988/client.crt: no such file or directory
E0620 18:16:33.078179    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/functional-493344/client.crt: no such file or directory
E0620 18:16:46.976860    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/auto-978988/client.crt: no such file or directory
start_stop_delete_test.go:256: (dbg) Done: out/minikube-linux-arm64 start -p default-k8s-diff-port-441994 --memory=2200 --alsologtostderr --wait=true --apiserver-port=8444 --driver=docker  --container-runtime=docker --kubernetes-version=v1.30.2: (4m27.068753222s)
start_stop_delete_test.go:262: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p default-k8s-diff-port-441994 -n default-k8s-diff-port-441994
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/SecondStart (267.47s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/UserAppExistsAfterStop (6.01s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/UserAppExistsAfterStop
start_stop_delete_test.go:274: (dbg) TestStartStop/group/no-preload/serial/UserAppExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:344: "kubernetes-dashboard-779776cb65-dkvxm" [bd75fa15-fd22-491c-9696-395274b2c143] Running
start_stop_delete_test.go:274: (dbg) TestStartStop/group/no-preload/serial/UserAppExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 6.00390285s
--- PASS: TestStartStop/group/no-preload/serial/UserAppExistsAfterStop (6.01s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/AddonExistsAfterStop (5.11s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/AddonExistsAfterStop
start_stop_delete_test.go:287: (dbg) TestStartStop/group/no-preload/serial/AddonExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:344: "kubernetes-dashboard-779776cb65-dkvxm" [bd75fa15-fd22-491c-9696-395274b2c143] Running
E0620 18:17:00.016313    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/addons-705802/client.crt: no such file or directory
start_stop_delete_test.go:287: (dbg) TestStartStop/group/no-preload/serial/AddonExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 5.003884792s
start_stop_delete_test.go:291: (dbg) Run:  kubectl --context no-preload-581163 describe deploy/dashboard-metrics-scraper -n kubernetes-dashboard
--- PASS: TestStartStop/group/no-preload/serial/AddonExistsAfterStop (5.11s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/VerifyKubernetesImages (0.26s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/VerifyKubernetesImages
start_stop_delete_test.go:304: (dbg) Run:  out/minikube-linux-arm64 -p no-preload-581163 image list --format=json
start_stop_delete_test.go:304: Found non-minikube image: gcr.io/k8s-minikube/busybox:1.28.4-glibc
--- PASS: TestStartStop/group/no-preload/serial/VerifyKubernetesImages (0.26s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/Pause (3.17s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/Pause
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 pause -p no-preload-581163 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p no-preload-581163 -n no-preload-581163
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.APIServer}} -p no-preload-581163 -n no-preload-581163: exit status 2 (353.930319ms)

                                                
                                                
-- stdout --
	Paused

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p no-preload-581163 -n no-preload-581163
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Kubelet}} -p no-preload-581163 -n no-preload-581163: exit status 2 (373.463394ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 unpause -p no-preload-581163 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p no-preload-581163 -n no-preload-581163
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p no-preload-581163 -n no-preload-581163
--- PASS: TestStartStop/group/no-preload/serial/Pause (3.17s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/FirstStart (47.31s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/FirstStart
start_stop_delete_test.go:186: (dbg) Run:  out/minikube-linux-arm64 start -p newest-cni-038983 --memory=2200 --alsologtostderr --wait=apiserver,system_pods,default_sa --feature-gates ServerSideApply=true --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 --driver=docker  --container-runtime=docker --kubernetes-version=v1.30.2
E0620 18:17:22.821586    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/calico-978988/client.crt: no such file or directory
E0620 18:17:24.021074    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/old-k8s-version-577369/client.crt: no such file or directory
E0620 18:17:35.118809    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/skaffold-187542/client.crt: no such file or directory
start_stop_delete_test.go:186: (dbg) Done: out/minikube-linux-arm64 start -p newest-cni-038983 --memory=2200 --alsologtostderr --wait=apiserver,system_pods,default_sa --feature-gates ServerSideApply=true --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 --driver=docker  --container-runtime=docker --kubernetes-version=v1.30.2: (47.307593891s)
--- PASS: TestStartStop/group/newest-cni/serial/FirstStart (47.31s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop (6.01s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop
start_stop_delete_test.go:274: (dbg) TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:344: "kubernetes-dashboard-779776cb65-9vzvz" [dedf3d04-49de-4827-bc66-68d27eeb81e9] Running
E0620 18:17:53.898152    7784 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/kindnet-978988/client.crt: no such file or directory
start_stop_delete_test.go:274: (dbg) TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 6.004508339s
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop (6.01s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop (5.24s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop
start_stop_delete_test.go:287: (dbg) TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:344: "kubernetes-dashboard-779776cb65-9vzvz" [dedf3d04-49de-4827-bc66-68d27eeb81e9] Running
start_stop_delete_test.go:287: (dbg) TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 5.025802567s
start_stop_delete_test.go:291: (dbg) Run:  kubectl --context default-k8s-diff-port-441994 describe deploy/dashboard-metrics-scraper -n kubernetes-dashboard
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop (5.24s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/DeployApp (0s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/DeployApp
--- PASS: TestStartStop/group/newest-cni/serial/DeployApp (0.00s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/EnableAddonWhileActive (1.15s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/EnableAddonWhileActive
start_stop_delete_test.go:205: (dbg) Run:  out/minikube-linux-arm64 addons enable metrics-server -p newest-cni-038983 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain
start_stop_delete_test.go:205: (dbg) Done: out/minikube-linux-arm64 addons enable metrics-server -p newest-cni-038983 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain: (1.146703205s)
start_stop_delete_test.go:211: WARNING: cni mode requires additional setup before pods can schedule :(
--- PASS: TestStartStop/group/newest-cni/serial/EnableAddonWhileActive (1.15s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/Stop (5.95s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/Stop
start_stop_delete_test.go:228: (dbg) Run:  out/minikube-linux-arm64 stop -p newest-cni-038983 --alsologtostderr -v=3
start_stop_delete_test.go:228: (dbg) Done: out/minikube-linux-arm64 stop -p newest-cni-038983 --alsologtostderr -v=3: (5.949555629s)
--- PASS: TestStartStop/group/newest-cni/serial/Stop (5.95s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/VerifyKubernetesImages (0.26s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/VerifyKubernetesImages
start_stop_delete_test.go:304: (dbg) Run:  out/minikube-linux-arm64 -p default-k8s-diff-port-441994 image list --format=json
start_stop_delete_test.go:304: Found non-minikube image: gcr.io/k8s-minikube/busybox:1.28.4-glibc
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/VerifyKubernetesImages (0.26s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/Pause (3.38s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/Pause
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 pause -p default-k8s-diff-port-441994 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p default-k8s-diff-port-441994 -n default-k8s-diff-port-441994
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.APIServer}} -p default-k8s-diff-port-441994 -n default-k8s-diff-port-441994: exit status 2 (319.569764ms)

                                                
                                                
-- stdout --
	Paused

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p default-k8s-diff-port-441994 -n default-k8s-diff-port-441994
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Kubelet}} -p default-k8s-diff-port-441994 -n default-k8s-diff-port-441994: exit status 2 (332.125079ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 unpause -p default-k8s-diff-port-441994 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p default-k8s-diff-port-441994 -n default-k8s-diff-port-441994
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p default-k8s-diff-port-441994 -n default-k8s-diff-port-441994
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/Pause (3.38s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/EnableAddonAfterStop (0.47s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/EnableAddonAfterStop
start_stop_delete_test.go:239: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p newest-cni-038983 -n newest-cni-038983
start_stop_delete_test.go:239: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Host}} -p newest-cni-038983 -n newest-cni-038983: exit status 7 (234.576124ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:239: status error: exit status 7 (may be ok)
start_stop_delete_test.go:246: (dbg) Run:  out/minikube-linux-arm64 addons enable dashboard -p newest-cni-038983 --images=MetricsScraper=registry.k8s.io/echoserver:1.4
--- PASS: TestStartStop/group/newest-cni/serial/EnableAddonAfterStop (0.47s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/SecondStart (18.32s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/SecondStart
start_stop_delete_test.go:256: (dbg) Run:  out/minikube-linux-arm64 start -p newest-cni-038983 --memory=2200 --alsologtostderr --wait=apiserver,system_pods,default_sa --feature-gates ServerSideApply=true --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 --driver=docker  --container-runtime=docker --kubernetes-version=v1.30.2
start_stop_delete_test.go:256: (dbg) Done: out/minikube-linux-arm64 start -p newest-cni-038983 --memory=2200 --alsologtostderr --wait=apiserver,system_pods,default_sa --feature-gates ServerSideApply=true --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 --driver=docker  --container-runtime=docker --kubernetes-version=v1.30.2: (17.950633768s)
start_stop_delete_test.go:262: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p newest-cni-038983 -n newest-cni-038983
--- PASS: TestStartStop/group/newest-cni/serial/SecondStart (18.32s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/UserAppExistsAfterStop (0s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/UserAppExistsAfterStop
start_stop_delete_test.go:273: WARNING: cni mode requires additional setup before pods can schedule :(
--- PASS: TestStartStop/group/newest-cni/serial/UserAppExistsAfterStop (0.00s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/AddonExistsAfterStop (0s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/AddonExistsAfterStop
start_stop_delete_test.go:284: WARNING: cni mode requires additional setup before pods can schedule :(
--- PASS: TestStartStop/group/newest-cni/serial/AddonExistsAfterStop (0.00s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/VerifyKubernetesImages (0.27s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/VerifyKubernetesImages
start_stop_delete_test.go:304: (dbg) Run:  out/minikube-linux-arm64 -p newest-cni-038983 image list --format=json
--- PASS: TestStartStop/group/newest-cni/serial/VerifyKubernetesImages (0.27s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/Pause (2.61s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/Pause
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 pause -p newest-cni-038983 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p newest-cni-038983 -n newest-cni-038983
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.APIServer}} -p newest-cni-038983 -n newest-cni-038983: exit status 2 (299.848033ms)

                                                
                                                
-- stdout --
	Paused

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p newest-cni-038983 -n newest-cni-038983
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Kubelet}} -p newest-cni-038983 -n newest-cni-038983: exit status 2 (305.507512ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 unpause -p newest-cni-038983 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p newest-cni-038983 -n newest-cni-038983
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p newest-cni-038983 -n newest-cni-038983
--- PASS: TestStartStop/group/newest-cni/serial/Pause (2.61s)

                                                
                                    

Test skip (24/343)

x
+
TestDownloadOnly/v1.20.0/cached-images (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.20.0/cached-images
aaa_download_only_test.go:129: Preload exists, images won't be cached
--- SKIP: TestDownloadOnly/v1.20.0/cached-images (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.20.0/binaries (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.20.0/binaries
aaa_download_only_test.go:151: Preload exists, binaries are present within.
--- SKIP: TestDownloadOnly/v1.20.0/binaries (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.20.0/kubectl (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.20.0/kubectl
aaa_download_only_test.go:167: Test for darwin and windows
--- SKIP: TestDownloadOnly/v1.20.0/kubectl (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.30.2/cached-images (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.30.2/cached-images
aaa_download_only_test.go:129: Preload exists, images won't be cached
--- SKIP: TestDownloadOnly/v1.30.2/cached-images (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.30.2/binaries (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.30.2/binaries
aaa_download_only_test.go:151: Preload exists, binaries are present within.
--- SKIP: TestDownloadOnly/v1.30.2/binaries (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.30.2/kubectl (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.30.2/kubectl
aaa_download_only_test.go:167: Test for darwin and windows
--- SKIP: TestDownloadOnly/v1.30.2/kubectl (0.00s)

                                                
                                    
x
+
TestDownloadOnlyKic (0.54s)

                                                
                                                
=== RUN   TestDownloadOnlyKic
aaa_download_only_test.go:232: (dbg) Run:  out/minikube-linux-arm64 start --download-only -p download-docker-388182 --alsologtostderr --driver=docker  --container-runtime=docker
aaa_download_only_test.go:244: Skip for arm64 platform. See https://github.com/kubernetes/minikube/issues/10144
helpers_test.go:175: Cleaning up "download-docker-388182" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p download-docker-388182
--- SKIP: TestDownloadOnlyKic (0.54s)

                                                
                                    
x
+
TestAddons/parallel/HelmTiller (0s)

                                                
                                                
=== RUN   TestAddons/parallel/HelmTiller
=== PAUSE TestAddons/parallel/HelmTiller

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/HelmTiller
addons_test.go:446: skip Helm test on arm64
--- SKIP: TestAddons/parallel/HelmTiller (0.00s)

                                                
                                    
x
+
TestAddons/parallel/Olm (0s)

                                                
                                                
=== RUN   TestAddons/parallel/Olm
=== PAUSE TestAddons/parallel/Olm

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/Olm
addons_test.go:500: Skipping OLM addon test until https://github.com/operator-framework/operator-lifecycle-manager/issues/2534 is resolved
--- SKIP: TestAddons/parallel/Olm (0.00s)

                                                
                                    
x
+
TestDockerEnvContainerd (0s)

                                                
                                                
=== RUN   TestDockerEnvContainerd
docker_test.go:170: running with docker true linux arm64
docker_test.go:172: skipping: TestDockerEnvContainerd can only be run with the containerd runtime on Docker driver
--- SKIP: TestDockerEnvContainerd (0.00s)

                                                
                                    
x
+
TestKVMDriverInstallOrUpdate (0s)

                                                
                                                
=== RUN   TestKVMDriverInstallOrUpdate
driver_install_or_update_test.go:45: Skip if arm64. See https://github.com/kubernetes/minikube/issues/10144
--- SKIP: TestKVMDriverInstallOrUpdate (0.00s)

                                                
                                    
x
+
TestHyperKitDriverInstallOrUpdate (0s)

                                                
                                                
=== RUN   TestHyperKitDriverInstallOrUpdate
driver_install_or_update_test.go:105: Skip if not darwin.
--- SKIP: TestHyperKitDriverInstallOrUpdate (0.00s)

                                                
                                    
x
+
TestHyperkitDriverSkipUpgrade (0s)

                                                
                                                
=== RUN   TestHyperkitDriverSkipUpgrade
driver_install_or_update_test.go:169: Skip if not darwin.
--- SKIP: TestHyperkitDriverSkipUpgrade (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/MySQL (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/MySQL
=== PAUSE TestFunctional/parallel/MySQL

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/MySQL
functional_test.go:1783: arm64 is not supported by mysql. Skip the test. See https://github.com/kubernetes/minikube/issues/10144
--- SKIP: TestFunctional/parallel/MySQL (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/PodmanEnv (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/PodmanEnv
=== PAUSE TestFunctional/parallel/PodmanEnv

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/PodmanEnv
functional_test.go:546: only validate podman env with docker container runtime, currently testing docker
--- SKIP: TestFunctional/parallel/PodmanEnv (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDig (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDig
functional_test_tunnel_test.go:99: DNS forwarding is only supported for Hyperkit on Darwin, skipping test DNS forwarding
--- SKIP: TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDig (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDscacheutil (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDscacheutil
functional_test_tunnel_test.go:99: DNS forwarding is only supported for Hyperkit on Darwin, skipping test DNS forwarding
--- SKIP: TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDscacheutil (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/AccessThroughDNS (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/AccessThroughDNS
functional_test_tunnel_test.go:99: DNS forwarding is only supported for Hyperkit on Darwin, skipping test DNS forwarding
--- SKIP: TestFunctional/parallel/TunnelCmd/serial/AccessThroughDNS (0.00s)

                                                
                                    
x
+
TestGvisorAddon (0s)

                                                
                                                
=== RUN   TestGvisorAddon
gvisor_addon_test.go:34: skipping test because --gvisor=false
--- SKIP: TestGvisorAddon (0.00s)

                                                
                                    
x
+
TestImageBuild/serial/validateImageBuildWithBuildEnv (0s)

                                                
                                                
=== RUN   TestImageBuild/serial/validateImageBuildWithBuildEnv
image_test.go:114: skipping due to https://github.com/kubernetes/minikube/issues/12431
--- SKIP: TestImageBuild/serial/validateImageBuildWithBuildEnv (0.00s)

                                                
                                    
x
+
TestChangeNoneUser (0s)

                                                
                                                
=== RUN   TestChangeNoneUser
none_test.go:38: Test requires none driver and SUDO_USER env to not be empty
--- SKIP: TestChangeNoneUser (0.00s)

                                                
                                    
x
+
TestScheduledStopWindows (0s)

                                                
                                                
=== RUN   TestScheduledStopWindows
scheduled_stop_test.go:42: test only runs on windows
--- SKIP: TestScheduledStopWindows (0.00s)

                                                
                                    
x
+
TestNetworkPlugins/group/cilium (5.28s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/cilium
net_test.go:102: Skipping the test as it's interfering with other tests and is outdated
panic.go:626: 
----------------------- debugLogs start: cilium-978988 [pass: true] --------------------------------
>>> netcat: nslookup kubernetes.default:
Error in configuration: context was not found for specified context: cilium-978988

                                                
                                                

                                                
                                                
>>> netcat: nslookup debug kubernetes.default a-records:
Error in configuration: context was not found for specified context: cilium-978988

                                                
                                                

                                                
                                                
>>> netcat: dig search kubernetes.default:
Error in configuration: context was not found for specified context: cilium-978988

                                                
                                                

                                                
                                                
>>> netcat: dig @10.96.0.10 kubernetes.default.svc.cluster.local udp/53:
Error in configuration: context was not found for specified context: cilium-978988

                                                
                                                

                                                
                                                
>>> netcat: dig @10.96.0.10 kubernetes.default.svc.cluster.local tcp/53:
Error in configuration: context was not found for specified context: cilium-978988

                                                
                                                

                                                
                                                
>>> netcat: nc 10.96.0.10 udp/53:
Error in configuration: context was not found for specified context: cilium-978988

                                                
                                                

                                                
                                                
>>> netcat: nc 10.96.0.10 tcp/53:
Error in configuration: context was not found for specified context: cilium-978988

                                                
                                                

                                                
                                                
>>> netcat: /etc/nsswitch.conf:
Error in configuration: context was not found for specified context: cilium-978988

                                                
                                                

                                                
                                                
>>> netcat: /etc/hosts:
Error in configuration: context was not found for specified context: cilium-978988

                                                
                                                

                                                
                                                
>>> netcat: /etc/resolv.conf:
Error in configuration: context was not found for specified context: cilium-978988

                                                
                                                

                                                
                                                
>>> host: /etc/nsswitch.conf:
* Profile "cilium-978988" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-978988"

                                                
                                                

                                                
                                                
>>> host: /etc/hosts:
* Profile "cilium-978988" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-978988"

                                                
                                                

                                                
                                                
>>> host: /etc/resolv.conf:
* Profile "cilium-978988" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-978988"

                                                
                                                

                                                
                                                
>>> k8s: nodes, services, endpoints, daemon sets, deployments and pods, :
Error in configuration: context was not found for specified context: cilium-978988

                                                
                                                

                                                
                                                
>>> host: crictl pods:
* Profile "cilium-978988" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-978988"

                                                
                                                

                                                
                                                
>>> host: crictl containers:
* Profile "cilium-978988" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-978988"

                                                
                                                

                                                
                                                
>>> k8s: describe netcat deployment:
error: context "cilium-978988" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe netcat pod(s):
error: context "cilium-978988" does not exist

                                                
                                                

                                                
                                                
>>> k8s: netcat logs:
error: context "cilium-978988" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe coredns deployment:
error: context "cilium-978988" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe coredns pods:
error: context "cilium-978988" does not exist

                                                
                                                

                                                
                                                
>>> k8s: coredns logs:
error: context "cilium-978988" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe api server pod(s):
error: context "cilium-978988" does not exist

                                                
                                                

                                                
                                                
>>> k8s: api server logs:
error: context "cilium-978988" does not exist

                                                
                                                

                                                
                                                
>>> host: /etc/cni:
* Profile "cilium-978988" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-978988"

                                                
                                                

                                                
                                                
>>> host: ip a s:
* Profile "cilium-978988" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-978988"

                                                
                                                

                                                
                                                
>>> host: ip r s:
* Profile "cilium-978988" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-978988"

                                                
                                                

                                                
                                                
>>> host: iptables-save:
* Profile "cilium-978988" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-978988"

                                                
                                                

                                                
                                                
>>> host: iptables table nat:
* Profile "cilium-978988" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-978988"

                                                
                                                

                                                
                                                
>>> k8s: describe cilium daemon set:
Error in configuration: context was not found for specified context: cilium-978988

                                                
                                                

                                                
                                                
>>> k8s: describe cilium daemon set pod(s):
Error in configuration: context was not found for specified context: cilium-978988

                                                
                                                

                                                
                                                
>>> k8s: cilium daemon set container(s) logs (current):
error: context "cilium-978988" does not exist

                                                
                                                

                                                
                                                
>>> k8s: cilium daemon set container(s) logs (previous):
error: context "cilium-978988" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe cilium deployment:
Error in configuration: context was not found for specified context: cilium-978988

                                                
                                                

                                                
                                                
>>> k8s: describe cilium deployment pod(s):
Error in configuration: context was not found for specified context: cilium-978988

                                                
                                                

                                                
                                                
>>> k8s: cilium deployment container(s) logs (current):
error: context "cilium-978988" does not exist

                                                
                                                

                                                
                                                
>>> k8s: cilium deployment container(s) logs (previous):
error: context "cilium-978988" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe kube-proxy daemon set:
error: context "cilium-978988" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe kube-proxy pod(s):
error: context "cilium-978988" does not exist

                                                
                                                

                                                
                                                
>>> k8s: kube-proxy logs:
error: context "cilium-978988" does not exist

                                                
                                                

                                                
                                                
>>> host: kubelet daemon status:
* Profile "cilium-978988" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-978988"

                                                
                                                

                                                
                                                
>>> host: kubelet daemon config:
* Profile "cilium-978988" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-978988"

                                                
                                                

                                                
                                                
>>> k8s: kubelet logs:
* Profile "cilium-978988" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-978988"

                                                
                                                

                                                
                                                
>>> host: /etc/kubernetes/kubelet.conf:
* Profile "cilium-978988" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-978988"

                                                
                                                

                                                
                                                
>>> host: /var/lib/kubelet/config.yaml:
* Profile "cilium-978988" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-978988"

                                                
                                                

                                                
                                                
>>> k8s: kubectl config:
apiVersion: v1
clusters:
- cluster:
certificate-authority: /home/jenkins/minikube-integration/19106-2452/.minikube/ca.crt
extensions:
- extension:
last-update: Thu, 20 Jun 2024 17:43:43 UTC
provider: minikube.sigs.k8s.io
version: v1.33.1
name: cluster_info
server: https://192.168.85.2:8443
name: NoKubernetes-487577
contexts:
- context:
cluster: NoKubernetes-487577
extensions:
- extension:
last-update: Thu, 20 Jun 2024 17:43:43 UTC
provider: minikube.sigs.k8s.io
version: v1.33.1
name: context_info
namespace: default
user: NoKubernetes-487577
name: NoKubernetes-487577
current-context: ""
kind: Config
preferences: {}
users:
- name: NoKubernetes-487577
user:
client-certificate: /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/NoKubernetes-487577/client.crt
client-key: /home/jenkins/minikube-integration/19106-2452/.minikube/profiles/NoKubernetes-487577/client.key

                                                
                                                

                                                
                                                
>>> k8s: cms:
Error in configuration: context was not found for specified context: cilium-978988

                                                
                                                

                                                
                                                
>>> host: docker daemon status:
* Profile "cilium-978988" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-978988"

                                                
                                                

                                                
                                                
>>> host: docker daemon config:
* Profile "cilium-978988" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-978988"

                                                
                                                

                                                
                                                
>>> host: /etc/docker/daemon.json:
* Profile "cilium-978988" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-978988"

                                                
                                                

                                                
                                                
>>> host: docker system info:
* Profile "cilium-978988" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-978988"

                                                
                                                

                                                
                                                
>>> host: cri-docker daemon status:
* Profile "cilium-978988" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-978988"

                                                
                                                

                                                
                                                
>>> host: cri-docker daemon config:
* Profile "cilium-978988" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-978988"

                                                
                                                

                                                
                                                
>>> host: /etc/systemd/system/cri-docker.service.d/10-cni.conf:
* Profile "cilium-978988" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-978988"

                                                
                                                

                                                
                                                
>>> host: /usr/lib/systemd/system/cri-docker.service:
* Profile "cilium-978988" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-978988"

                                                
                                                

                                                
                                                
>>> host: cri-dockerd version:
* Profile "cilium-978988" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-978988"

                                                
                                                

                                                
                                                
>>> host: containerd daemon status:
* Profile "cilium-978988" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-978988"

                                                
                                                

                                                
                                                
>>> host: containerd daemon config:
* Profile "cilium-978988" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-978988"

                                                
                                                

                                                
                                                
>>> host: /lib/systemd/system/containerd.service:
* Profile "cilium-978988" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-978988"

                                                
                                                

                                                
                                                
>>> host: /etc/containerd/config.toml:
* Profile "cilium-978988" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-978988"

                                                
                                                

                                                
                                                
>>> host: containerd config dump:
* Profile "cilium-978988" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-978988"

                                                
                                                

                                                
                                                
>>> host: crio daemon status:
* Profile "cilium-978988" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-978988"

                                                
                                                

                                                
                                                
>>> host: crio daemon config:
* Profile "cilium-978988" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-978988"

                                                
                                                

                                                
                                                
>>> host: /etc/crio:
* Profile "cilium-978988" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-978988"

                                                
                                                

                                                
                                                
>>> host: crio config:
* Profile "cilium-978988" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-978988"

                                                
                                                
----------------------- debugLogs end: cilium-978988 [took: 5.048745603s] --------------------------------
helpers_test.go:175: Cleaning up "cilium-978988" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p cilium-978988
--- SKIP: TestNetworkPlugins/group/cilium (5.28s)

                                                
                                    
x
+
TestStartStop/group/disable-driver-mounts (0.18s)

                                                
                                                
=== RUN   TestStartStop/group/disable-driver-mounts
=== PAUSE TestStartStop/group/disable-driver-mounts

                                                
                                                

                                                
                                                
=== CONT  TestStartStop/group/disable-driver-mounts
start_stop_delete_test.go:103: skipping TestStartStop/group/disable-driver-mounts - only runs on virtualbox
helpers_test.go:175: Cleaning up "disable-driver-mounts-710184" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p disable-driver-mounts-710184
--- SKIP: TestStartStop/group/disable-driver-mounts (0.18s)

                                                
                                    
Copied to clipboard