Test Report: Docker_Linux_crio 16969

                    
                      e754f159ea363f9e33ad2331b33fc10ae6e501a8:2023-07-31:30375
                    
                

Test fail (7/304)

x
+
TestAddons/parallel/Ingress (166.41s)

                                                
                                                
=== RUN   TestAddons/parallel/Ingress
=== PAUSE TestAddons/parallel/Ingress

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/Ingress
addons_test.go:183: (dbg) Run:  kubectl --context addons-764200 wait --for=condition=ready --namespace=ingress-nginx pod --selector=app.kubernetes.io/component=controller --timeout=90s
addons_test.go:183: (dbg) Done: kubectl --context addons-764200 wait --for=condition=ready --namespace=ingress-nginx pod --selector=app.kubernetes.io/component=controller --timeout=90s: (12.540146202s)
addons_test.go:208: (dbg) Run:  kubectl --context addons-764200 replace --force -f testdata/nginx-ingress-v1.yaml
addons_test.go:221: (dbg) Run:  kubectl --context addons-764200 replace --force -f testdata/nginx-pod-svc.yaml
addons_test.go:226: (dbg) TestAddons/parallel/Ingress: waiting 8m0s for pods matching "run=nginx" in namespace "default" ...
helpers_test.go:344: "nginx" [21f1b8e8-7155-4721-aedd-cdffbc6e56c9] Pending / Ready:ContainersNotReady (containers with unready status: [nginx]) / ContainersReady:ContainersNotReady (containers with unready status: [nginx])
helpers_test.go:344: "nginx" [21f1b8e8-7155-4721-aedd-cdffbc6e56c9] Running
addons_test.go:226: (dbg) TestAddons/parallel/Ingress: run=nginx healthy within 11.009471676s
addons_test.go:238: (dbg) Run:  out/minikube-linux-amd64 -p addons-764200 ssh "curl -s http://127.0.0.1/ -H 'Host: nginx.example.com'"
addons_test.go:238: (dbg) Non-zero exit: out/minikube-linux-amd64 -p addons-764200 ssh "curl -s http://127.0.0.1/ -H 'Host: nginx.example.com'": exit status 1 (2m9.961084449s)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 28

                                                
                                                
** /stderr **
addons_test.go:254: failed to get expected response from http://127.0.0.1/ within minikube: exit status 1
addons_test.go:262: (dbg) Run:  kubectl --context addons-764200 replace --force -f testdata/ingress-dns-example-v1.yaml
addons_test.go:267: (dbg) Run:  out/minikube-linux-amd64 -p addons-764200 ip
addons_test.go:273: (dbg) Run:  nslookup hello-john.test 192.168.49.2
addons_test.go:282: (dbg) Run:  out/minikube-linux-amd64 -p addons-764200 addons disable ingress-dns --alsologtostderr -v=1
addons_test.go:282: (dbg) Done: out/minikube-linux-amd64 -p addons-764200 addons disable ingress-dns --alsologtostderr -v=1: (1.532765531s)
addons_test.go:287: (dbg) Run:  out/minikube-linux-amd64 -p addons-764200 addons disable ingress --alsologtostderr -v=1
addons_test.go:287: (dbg) Done: out/minikube-linux-amd64 -p addons-764200 addons disable ingress --alsologtostderr -v=1: (7.580721697s)
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======>  post-mortem[TestAddons/parallel/Ingress]: docker inspect <======
helpers_test.go:231: (dbg) Run:  docker inspect addons-764200
helpers_test.go:235: (dbg) docker inspect addons-764200:

                                                
                                                
-- stdout --
	[
	    {
	        "Id": "72ab2ac326d0be794478a6051c2d75b49334eb840910e3cba2ded007d6593519",
	        "Created": "2023-07-31T10:34:17.036083956Z",
	        "Path": "/usr/local/bin/entrypoint",
	        "Args": [
	            "/sbin/init"
	        ],
	        "State": {
	            "Status": "running",
	            "Running": true,
	            "Paused": false,
	            "Restarting": false,
	            "OOMKilled": false,
	            "Dead": false,
	            "Pid": 14119,
	            "ExitCode": 0,
	            "Error": "",
	            "StartedAt": "2023-07-31T10:34:17.347587195Z",
	            "FinishedAt": "0001-01-01T00:00:00Z"
	        },
	        "Image": "sha256:c6cc01e6091959400f260dc442708e7c71630b58dab1f7c344cb00926bd84950",
	        "ResolvConfPath": "/var/lib/docker/containers/72ab2ac326d0be794478a6051c2d75b49334eb840910e3cba2ded007d6593519/resolv.conf",
	        "HostnamePath": "/var/lib/docker/containers/72ab2ac326d0be794478a6051c2d75b49334eb840910e3cba2ded007d6593519/hostname",
	        "HostsPath": "/var/lib/docker/containers/72ab2ac326d0be794478a6051c2d75b49334eb840910e3cba2ded007d6593519/hosts",
	        "LogPath": "/var/lib/docker/containers/72ab2ac326d0be794478a6051c2d75b49334eb840910e3cba2ded007d6593519/72ab2ac326d0be794478a6051c2d75b49334eb840910e3cba2ded007d6593519-json.log",
	        "Name": "/addons-764200",
	        "RestartCount": 0,
	        "Driver": "overlay2",
	        "Platform": "linux",
	        "MountLabel": "",
	        "ProcessLabel": "",
	        "AppArmorProfile": "unconfined",
	        "ExecIDs": null,
	        "HostConfig": {
	            "Binds": [
	                "/lib/modules:/lib/modules:ro",
	                "addons-764200:/var"
	            ],
	            "ContainerIDFile": "",
	            "LogConfig": {
	                "Type": "json-file",
	                "Config": {
	                    "max-size": "100m"
	                }
	            },
	            "NetworkMode": "addons-764200",
	            "PortBindings": {
	                "22/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "2376/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "32443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "5000/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "8443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ]
	            },
	            "RestartPolicy": {
	                "Name": "no",
	                "MaximumRetryCount": 0
	            },
	            "AutoRemove": false,
	            "VolumeDriver": "",
	            "VolumesFrom": null,
	            "ConsoleSize": [
	                0,
	                0
	            ],
	            "CapAdd": null,
	            "CapDrop": null,
	            "CgroupnsMode": "host",
	            "Dns": [],
	            "DnsOptions": [],
	            "DnsSearch": [],
	            "ExtraHosts": null,
	            "GroupAdd": null,
	            "IpcMode": "private",
	            "Cgroup": "",
	            "Links": null,
	            "OomScoreAdj": 0,
	            "PidMode": "",
	            "Privileged": true,
	            "PublishAllPorts": false,
	            "ReadonlyRootfs": false,
	            "SecurityOpt": [
	                "seccomp=unconfined",
	                "apparmor=unconfined",
	                "label=disable"
	            ],
	            "Tmpfs": {
	                "/run": "",
	                "/tmp": ""
	            },
	            "UTSMode": "",
	            "UsernsMode": "",
	            "ShmSize": 67108864,
	            "Runtime": "runc",
	            "Isolation": "",
	            "CpuShares": 0,
	            "Memory": 4194304000,
	            "NanoCpus": 2000000000,
	            "CgroupParent": "",
	            "BlkioWeight": 0,
	            "BlkioWeightDevice": [],
	            "BlkioDeviceReadBps": [],
	            "BlkioDeviceWriteBps": [],
	            "BlkioDeviceReadIOps": [],
	            "BlkioDeviceWriteIOps": [],
	            "CpuPeriod": 0,
	            "CpuQuota": 0,
	            "CpuRealtimePeriod": 0,
	            "CpuRealtimeRuntime": 0,
	            "CpusetCpus": "",
	            "CpusetMems": "",
	            "Devices": [],
	            "DeviceCgroupRules": null,
	            "DeviceRequests": null,
	            "MemoryReservation": 0,
	            "MemorySwap": 8388608000,
	            "MemorySwappiness": null,
	            "OomKillDisable": false,
	            "PidsLimit": null,
	            "Ulimits": null,
	            "CpuCount": 0,
	            "CpuPercent": 0,
	            "IOMaximumIOps": 0,
	            "IOMaximumBandwidth": 0,
	            "MaskedPaths": null,
	            "ReadonlyPaths": null
	        },
	        "GraphDriver": {
	            "Data": {
	                "LowerDir": "/var/lib/docker/overlay2/22d7e878f2fe3252bf171debaa896b50bc3519e42cd0b429c26cb106f669cda9-init/diff:/var/lib/docker/overlay2/738d78659811af5605d784380774f3996551e9a95d42d3d998a185d72e7e9dcf/diff",
	                "MergedDir": "/var/lib/docker/overlay2/22d7e878f2fe3252bf171debaa896b50bc3519e42cd0b429c26cb106f669cda9/merged",
	                "UpperDir": "/var/lib/docker/overlay2/22d7e878f2fe3252bf171debaa896b50bc3519e42cd0b429c26cb106f669cda9/diff",
	                "WorkDir": "/var/lib/docker/overlay2/22d7e878f2fe3252bf171debaa896b50bc3519e42cd0b429c26cb106f669cda9/work"
	            },
	            "Name": "overlay2"
	        },
	        "Mounts": [
	            {
	                "Type": "bind",
	                "Source": "/lib/modules",
	                "Destination": "/lib/modules",
	                "Mode": "ro",
	                "RW": false,
	                "Propagation": "rprivate"
	            },
	            {
	                "Type": "volume",
	                "Name": "addons-764200",
	                "Source": "/var/lib/docker/volumes/addons-764200/_data",
	                "Destination": "/var",
	                "Driver": "local",
	                "Mode": "z",
	                "RW": true,
	                "Propagation": ""
	            }
	        ],
	        "Config": {
	            "Hostname": "addons-764200",
	            "Domainname": "",
	            "User": "",
	            "AttachStdin": false,
	            "AttachStdout": false,
	            "AttachStderr": false,
	            "ExposedPorts": {
	                "22/tcp": {},
	                "2376/tcp": {},
	                "32443/tcp": {},
	                "5000/tcp": {},
	                "8443/tcp": {}
	            },
	            "Tty": true,
	            "OpenStdin": false,
	            "StdinOnce": false,
	            "Env": [
	                "container=docker",
	                "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
	            ],
	            "Cmd": null,
	            "Image": "gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631",
	            "Volumes": null,
	            "WorkingDir": "/",
	            "Entrypoint": [
	                "/usr/local/bin/entrypoint",
	                "/sbin/init"
	            ],
	            "OnBuild": null,
	            "Labels": {
	                "created_by.minikube.sigs.k8s.io": "true",
	                "mode.minikube.sigs.k8s.io": "addons-764200",
	                "name.minikube.sigs.k8s.io": "addons-764200",
	                "role.minikube.sigs.k8s.io": ""
	            },
	            "StopSignal": "SIGRTMIN+3"
	        },
	        "NetworkSettings": {
	            "Bridge": "",
	            "SandboxID": "0a08cc55d95f1f102acee2b3938d0047d8e180ad0cafd5ed4238f1a1690b57f6",
	            "HairpinMode": false,
	            "LinkLocalIPv6Address": "",
	            "LinkLocalIPv6PrefixLen": 0,
	            "Ports": {
	                "22/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "32772"
	                    }
	                ],
	                "2376/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "32771"
	                    }
	                ],
	                "32443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "32768"
	                    }
	                ],
	                "5000/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "32770"
	                    }
	                ],
	                "8443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "32769"
	                    }
	                ]
	            },
	            "SandboxKey": "/var/run/docker/netns/0a08cc55d95f",
	            "SecondaryIPAddresses": null,
	            "SecondaryIPv6Addresses": null,
	            "EndpointID": "",
	            "Gateway": "",
	            "GlobalIPv6Address": "",
	            "GlobalIPv6PrefixLen": 0,
	            "IPAddress": "",
	            "IPPrefixLen": 0,
	            "IPv6Gateway": "",
	            "MacAddress": "",
	            "Networks": {
	                "addons-764200": {
	                    "IPAMConfig": {
	                        "IPv4Address": "192.168.49.2"
	                    },
	                    "Links": null,
	                    "Aliases": [
	                        "72ab2ac326d0",
	                        "addons-764200"
	                    ],
	                    "NetworkID": "cc3873672147dc647d756aaaf84f437d564afff01f1d3b36b89fda7301c80b40",
	                    "EndpointID": "966f2085397749ddeeb989557a5c5ac7ab9c994729c79b8a6f7ebd5ab6d42100",
	                    "Gateway": "192.168.49.1",
	                    "IPAddress": "192.168.49.2",
	                    "IPPrefixLen": 24,
	                    "IPv6Gateway": "",
	                    "GlobalIPv6Address": "",
	                    "GlobalIPv6PrefixLen": 0,
	                    "MacAddress": "02:42:c0:a8:31:02",
	                    "DriverOpts": null
	                }
	            }
	        }
	    }
	]

                                                
                                                
-- /stdout --
helpers_test.go:239: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Host}} -p addons-764200 -n addons-764200
helpers_test.go:244: <<< TestAddons/parallel/Ingress FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======>  post-mortem[TestAddons/parallel/Ingress]: minikube logs <======
helpers_test.go:247: (dbg) Run:  out/minikube-linux-amd64 -p addons-764200 logs -n 25
helpers_test.go:247: (dbg) Done: out/minikube-linux-amd64 -p addons-764200 logs -n 25: (1.10386552s)
helpers_test.go:252: TestAddons/parallel/Ingress logs: 
-- stdout --
	* 
	* ==> Audit <==
	* |---------|--------------------------------|------------------------|---------|---------|---------------------|---------------------|
	| Command |              Args              |        Profile         |  User   | Version |     Start Time      |      End Time       |
	|---------|--------------------------------|------------------------|---------|---------|---------------------|---------------------|
	| start   | -o=json --download-only        | download-only-301760   | jenkins | v1.31.1 | 31 Jul 23 10:33 UTC |                     |
	|         | -p download-only-301760        |                        |         |         |                     |                     |
	|         | --force --alsologtostderr      |                        |         |         |                     |                     |
	|         | --kubernetes-version=v1.16.0   |                        |         |         |                     |                     |
	|         | --container-runtime=crio       |                        |         |         |                     |                     |
	|         | --driver=docker                |                        |         |         |                     |                     |
	|         | --container-runtime=crio       |                        |         |         |                     |                     |
	| start   | -o=json --download-only        | download-only-301760   | jenkins | v1.31.1 | 31 Jul 23 10:33 UTC |                     |
	|         | -p download-only-301760        |                        |         |         |                     |                     |
	|         | --force --alsologtostderr      |                        |         |         |                     |                     |
	|         | --kubernetes-version=v1.27.3   |                        |         |         |                     |                     |
	|         | --container-runtime=crio       |                        |         |         |                     |                     |
	|         | --driver=docker                |                        |         |         |                     |                     |
	|         | --container-runtime=crio       |                        |         |         |                     |                     |
	| delete  | --all                          | minikube               | jenkins | v1.31.1 | 31 Jul 23 10:33 UTC | 31 Jul 23 10:33 UTC |
	| delete  | -p download-only-301760        | download-only-301760   | jenkins | v1.31.1 | 31 Jul 23 10:33 UTC | 31 Jul 23 10:33 UTC |
	| delete  | -p download-only-301760        | download-only-301760   | jenkins | v1.31.1 | 31 Jul 23 10:33 UTC | 31 Jul 23 10:33 UTC |
	| start   | --download-only -p             | download-docker-990933 | jenkins | v1.31.1 | 31 Jul 23 10:33 UTC |                     |
	|         | download-docker-990933         |                        |         |         |                     |                     |
	|         | --alsologtostderr              |                        |         |         |                     |                     |
	|         | --driver=docker                |                        |         |         |                     |                     |
	|         | --container-runtime=crio       |                        |         |         |                     |                     |
	| delete  | -p download-docker-990933      | download-docker-990933 | jenkins | v1.31.1 | 31 Jul 23 10:33 UTC | 31 Jul 23 10:33 UTC |
	| start   | --download-only -p             | binary-mirror-398610   | jenkins | v1.31.1 | 31 Jul 23 10:33 UTC |                     |
	|         | binary-mirror-398610           |                        |         |         |                     |                     |
	|         | --alsologtostderr              |                        |         |         |                     |                     |
	|         | --binary-mirror                |                        |         |         |                     |                     |
	|         | http://127.0.0.1:38677         |                        |         |         |                     |                     |
	|         | --driver=docker                |                        |         |         |                     |                     |
	|         | --container-runtime=crio       |                        |         |         |                     |                     |
	| delete  | -p binary-mirror-398610        | binary-mirror-398610   | jenkins | v1.31.1 | 31 Jul 23 10:33 UTC | 31 Jul 23 10:33 UTC |
	| start   | -p addons-764200               | addons-764200          | jenkins | v1.31.1 | 31 Jul 23 10:33 UTC | 31 Jul 23 10:35 UTC |
	|         | --wait=true --memory=4000      |                        |         |         |                     |                     |
	|         | --alsologtostderr              |                        |         |         |                     |                     |
	|         | --addons=registry              |                        |         |         |                     |                     |
	|         | --addons=metrics-server        |                        |         |         |                     |                     |
	|         | --addons=volumesnapshots       |                        |         |         |                     |                     |
	|         | --addons=csi-hostpath-driver   |                        |         |         |                     |                     |
	|         | --addons=gcp-auth              |                        |         |         |                     |                     |
	|         | --addons=cloud-spanner         |                        |         |         |                     |                     |
	|         | --addons=inspektor-gadget      |                        |         |         |                     |                     |
	|         | --driver=docker                |                        |         |         |                     |                     |
	|         | --container-runtime=crio       |                        |         |         |                     |                     |
	|         | --addons=ingress               |                        |         |         |                     |                     |
	|         | --addons=ingress-dns           |                        |         |         |                     |                     |
	|         | --addons=helm-tiller           |                        |         |         |                     |                     |
	| addons  | disable cloud-spanner -p       | addons-764200          | jenkins | v1.31.1 | 31 Jul 23 10:36 UTC | 31 Jul 23 10:36 UTC |
	|         | addons-764200                  |                        |         |         |                     |                     |
	| addons  | addons-764200 addons           | addons-764200          | jenkins | v1.31.1 | 31 Jul 23 10:36 UTC | 31 Jul 23 10:36 UTC |
	|         | disable metrics-server         |                        |         |         |                     |                     |
	|         | --alsologtostderr -v=1         |                        |         |         |                     |                     |
	| addons  | disable inspektor-gadget -p    | addons-764200          | jenkins | v1.31.1 | 31 Jul 23 10:36 UTC | 31 Jul 23 10:36 UTC |
	|         | addons-764200                  |                        |         |         |                     |                     |
	| ip      | addons-764200 ip               | addons-764200          | jenkins | v1.31.1 | 31 Jul 23 10:36 UTC | 31 Jul 23 10:36 UTC |
	| addons  | addons-764200 addons disable   | addons-764200          | jenkins | v1.31.1 | 31 Jul 23 10:36 UTC | 31 Jul 23 10:36 UTC |
	|         | registry --alsologtostderr     |                        |         |         |                     |                     |
	|         | -v=1                           |                        |         |         |                     |                     |
	| addons  | enable headlamp                | addons-764200          | jenkins | v1.31.1 | 31 Jul 23 10:36 UTC | 31 Jul 23 10:36 UTC |
	|         | -p addons-764200               |                        |         |         |                     |                     |
	|         | --alsologtostderr -v=1         |                        |         |         |                     |                     |
	| ssh     | addons-764200 ssh curl -s      | addons-764200          | jenkins | v1.31.1 | 31 Jul 23 10:36 UTC |                     |
	|         | http://127.0.0.1/ -H 'Host:    |                        |         |         |                     |                     |
	|         | nginx.example.com'             |                        |         |         |                     |                     |
	| addons  | addons-764200 addons disable   | addons-764200          | jenkins | v1.31.1 | 31 Jul 23 10:36 UTC | 31 Jul 23 10:36 UTC |
	|         | helm-tiller --alsologtostderr  |                        |         |         |                     |                     |
	|         | -v=1                           |                        |         |         |                     |                     |
	| addons  | addons-764200 addons           | addons-764200          | jenkins | v1.31.1 | 31 Jul 23 10:37 UTC | 31 Jul 23 10:37 UTC |
	|         | disable csi-hostpath-driver    |                        |         |         |                     |                     |
	|         | --alsologtostderr -v=1         |                        |         |         |                     |                     |
	| addons  | addons-764200 addons           | addons-764200          | jenkins | v1.31.1 | 31 Jul 23 10:37 UTC | 31 Jul 23 10:37 UTC |
	|         | disable volumesnapshots        |                        |         |         |                     |                     |
	|         | --alsologtostderr -v=1         |                        |         |         |                     |                     |
	| ip      | addons-764200 ip               | addons-764200          | jenkins | v1.31.1 | 31 Jul 23 10:38 UTC | 31 Jul 23 10:38 UTC |
	| addons  | addons-764200 addons disable   | addons-764200          | jenkins | v1.31.1 | 31 Jul 23 10:38 UTC | 31 Jul 23 10:38 UTC |
	|         | ingress-dns --alsologtostderr  |                        |         |         |                     |                     |
	|         | -v=1                           |                        |         |         |                     |                     |
	| addons  | addons-764200 addons disable   | addons-764200          | jenkins | v1.31.1 | 31 Jul 23 10:38 UTC | 31 Jul 23 10:38 UTC |
	|         | ingress --alsologtostderr -v=1 |                        |         |         |                     |                     |
	|---------|--------------------------------|------------------------|---------|---------|---------------------|---------------------|
	
	* 
	* ==> Last Start <==
	* Log file created at: 2023/07/31 10:33:53
	Running on machine: ubuntu-20-agent-15
	Binary: Built with gc go1.20.6 for linux/amd64
	Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
	I0731 10:33:53.872524   13443 out.go:296] Setting OutFile to fd 1 ...
	I0731 10:33:53.872664   13443 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I0731 10:33:53.872676   13443 out.go:309] Setting ErrFile to fd 2...
	I0731 10:33:53.872683   13443 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I0731 10:33:53.872868   13443 root.go:338] Updating PATH: /home/jenkins/minikube-integration/16969-5799/.minikube/bin
	I0731 10:33:53.873416   13443 out.go:303] Setting JSON to false
	I0731 10:33:53.874171   13443 start.go:128] hostinfo: {"hostname":"ubuntu-20-agent-15","uptime":986,"bootTime":1690798648,"procs":170,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1038-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
	I0731 10:33:53.874247   13443 start.go:138] virtualization: kvm guest
	I0731 10:33:53.876649   13443 out.go:177] * [addons-764200] minikube v1.31.1 on Ubuntu 20.04 (kvm/amd64)
	I0731 10:33:53.878071   13443 notify.go:220] Checking for updates...
	I0731 10:33:53.878079   13443 out.go:177]   - MINIKUBE_LOCATION=16969
	I0731 10:33:53.879604   13443 out.go:177]   - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I0731 10:33:53.880937   13443 out.go:177]   - KUBECONFIG=/home/jenkins/minikube-integration/16969-5799/kubeconfig
	I0731 10:33:53.882158   13443 out.go:177]   - MINIKUBE_HOME=/home/jenkins/minikube-integration/16969-5799/.minikube
	I0731 10:33:53.883554   13443 out.go:177]   - MINIKUBE_BIN=out/minikube-linux-amd64
	I0731 10:33:53.884825   13443 out.go:177]   - MINIKUBE_FORCE_SYSTEMD=
	I0731 10:33:53.886279   13443 driver.go:373] Setting default libvirt URI to qemu:///system
	I0731 10:33:53.905436   13443 docker.go:121] docker version: linux-24.0.5:Docker Engine - Community
	I0731 10:33:53.905539   13443 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0731 10:33:53.954297   13443 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:29 OomKillDisable:true NGoroutines:39 SystemTime:2023-07-31 10:33:53.946232685 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1038-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archi
tecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33648062464 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-15 Labels:[] ExperimentalBuild:false ServerVersion:24.0.5 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:3dce8eb055cbb6872793272b4f20ed16117344f8 Expected:3dce8eb055cbb6872793272b4f20ed16117344f8} RuncCommit:{ID:v1.1.7-0-g860f061 Expected:v1.1.7-0-g860f061} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.20.2] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
	I0731 10:33:53.954389   13443 docker.go:294] overlay module found
	I0731 10:33:53.956203   13443 out.go:177] * Using the docker driver based on user configuration
	I0731 10:33:53.957562   13443 start.go:298] selected driver: docker
	I0731 10:33:53.957572   13443 start.go:898] validating driver "docker" against <nil>
	I0731 10:33:53.957581   13443 start.go:909] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
	I0731 10:33:53.958323   13443 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0731 10:33:54.005975   13443 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:29 OomKillDisable:true NGoroutines:39 SystemTime:2023-07-31 10:33:53.998094558 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1038-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archi
tecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33648062464 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-15 Labels:[] ExperimentalBuild:false ServerVersion:24.0.5 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:3dce8eb055cbb6872793272b4f20ed16117344f8 Expected:3dce8eb055cbb6872793272b4f20ed16117344f8} RuncCommit:{ID:v1.1.7-0-g860f061 Expected:v1.1.7-0-g860f061} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.20.2] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
	I0731 10:33:54.006122   13443 start_flags.go:305] no existing cluster config was found, will generate one from the flags 
	I0731 10:33:54.006327   13443 start_flags.go:919] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
	I0731 10:33:54.007910   13443 out.go:177] * Using Docker driver with root privileges
	I0731 10:33:54.009477   13443 cni.go:84] Creating CNI manager for ""
	I0731 10:33:54.009489   13443 cni.go:143] "docker" driver + "crio" runtime found, recommending kindnet
	I0731 10:33:54.009497   13443 start_flags.go:314] Found "CNI" CNI - setting NetworkPlugin=cni
	I0731 10:33:54.009504   13443 start_flags.go:319] config:
	{Name:addons-764200 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 Memory:4000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.27.3 ClusterName:addons-764200 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cn
i FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0}
	I0731 10:33:54.010992   13443 out.go:177] * Starting control plane node addons-764200 in cluster addons-764200
	I0731 10:33:54.012294   13443 cache.go:122] Beginning downloading kic base image for docker with crio
	I0731 10:33:54.013618   13443 out.go:177] * Pulling base image ...
	I0731 10:33:54.014989   13443 preload.go:132] Checking if preload exists for k8s version v1.27.3 and runtime crio
	I0731 10:33:54.015017   13443 image.go:79] Checking for gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 in local docker daemon
	I0731 10:33:54.015024   13443 preload.go:148] Found local preload: /home/jenkins/minikube-integration/16969-5799/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.27.3-cri-o-overlay-amd64.tar.lz4
	I0731 10:33:54.015126   13443 cache.go:57] Caching tarball of preloaded images
	I0731 10:33:54.015199   13443 preload.go:174] Found /home/jenkins/minikube-integration/16969-5799/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.27.3-cri-o-overlay-amd64.tar.lz4 in cache, skipping download
	I0731 10:33:54.015211   13443 cache.go:60] Finished verifying existence of preloaded tar for  v1.27.3 on crio
	I0731 10:33:54.015547   13443 profile.go:148] Saving config to /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/config.json ...
	I0731 10:33:54.015570   13443 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/config.json: {Name:mk366a9f3a551ef9f1de94cb1d79e7cbdbfcdead Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0731 10:33:54.028735   13443 cache.go:150] Downloading gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 to local cache
	I0731 10:33:54.028810   13443 image.go:63] Checking for gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 in local cache directory
	I0731 10:33:54.028825   13443 image.go:66] Found gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 in local cache directory, skipping pull
	I0731 10:33:54.028829   13443 image.go:105] gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 exists in cache, skipping pull
	I0731 10:33:54.028838   13443 cache.go:153] successfully saved gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 as a tarball
	I0731 10:33:54.028842   13443 cache.go:163] Loading gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 from local cache
	I0731 10:34:04.678547   13443 cache.go:165] successfully loaded and using gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 from cached tarball
	I0731 10:34:04.678585   13443 cache.go:195] Successfully downloaded all kic artifacts
	I0731 10:34:04.678633   13443 start.go:365] acquiring machines lock for addons-764200: {Name:mkcf6e97a398f828c1086969cd57f6695603e875 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0731 10:34:04.678732   13443 start.go:369] acquired machines lock for "addons-764200" in 76.042µs
	I0731 10:34:04.678756   13443 start.go:93] Provisioning new machine with config: &{Name:addons-764200 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 Memory:4000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.27.3 ClusterName:addons-764200 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServ
erIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.27.3 ContainerRuntime:crio ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQem
uFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0} &{Name: IP: Port:8443 KubernetesVersion:v1.27.3 ContainerRuntime:crio ControlPlane:true Worker:true}
	I0731 10:34:04.678901   13443 start.go:125] createHost starting for "" (driver="docker")
	I0731 10:34:04.680680   13443 out.go:204] * Creating docker container (CPUs=2, Memory=4000MB) ...
	I0731 10:34:04.680924   13443 start.go:159] libmachine.API.Create for "addons-764200" (driver="docker")
	I0731 10:34:04.680950   13443 client.go:168] LocalClient.Create starting
	I0731 10:34:04.681033   13443 main.go:141] libmachine: Creating CA: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem
	I0731 10:34:04.830795   13443 main.go:141] libmachine: Creating client certificate: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/cert.pem
	I0731 10:34:04.930927   13443 cli_runner.go:164] Run: docker network inspect addons-764200 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
	W0731 10:34:04.945305   13443 cli_runner.go:211] docker network inspect addons-764200 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
	I0731 10:34:04.945362   13443 network_create.go:281] running [docker network inspect addons-764200] to gather additional debugging logs...
	I0731 10:34:04.945381   13443 cli_runner.go:164] Run: docker network inspect addons-764200
	W0731 10:34:04.958857   13443 cli_runner.go:211] docker network inspect addons-764200 returned with exit code 1
	I0731 10:34:04.958879   13443 network_create.go:284] error running [docker network inspect addons-764200]: docker network inspect addons-764200: exit status 1
	stdout:
	[]
	
	stderr:
	Error response from daemon: network addons-764200 not found
	I0731 10:34:04.958891   13443 network_create.go:286] output of [docker network inspect addons-764200]: -- stdout --
	[]
	
	-- /stdout --
	** stderr ** 
	Error response from daemon: network addons-764200 not found
	
	** /stderr **
	I0731 10:34:04.958929   13443 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
	I0731 10:34:04.973286   13443 network.go:209] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc000df6ce0}
	I0731 10:34:04.973324   13443 network_create.go:123] attempt to create docker network addons-764200 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
	I0731 10:34:04.973359   13443 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=addons-764200 addons-764200
	I0731 10:34:05.020819   13443 network_create.go:107] docker network addons-764200 192.168.49.0/24 created
	I0731 10:34:05.020847   13443 kic.go:117] calculated static IP "192.168.49.2" for the "addons-764200" container
	I0731 10:34:05.020903   13443 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
	I0731 10:34:05.034786   13443 cli_runner.go:164] Run: docker volume create addons-764200 --label name.minikube.sigs.k8s.io=addons-764200 --label created_by.minikube.sigs.k8s.io=true
	I0731 10:34:05.049997   13443 oci.go:103] Successfully created a docker volume addons-764200
	I0731 10:34:05.050054   13443 cli_runner.go:164] Run: docker run --rm --name addons-764200-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-764200 --entrypoint /usr/bin/test -v addons-764200:/var gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 -d /var/lib
	I0731 10:34:12.187282   13443 cli_runner.go:217] Completed: docker run --rm --name addons-764200-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-764200 --entrypoint /usr/bin/test -v addons-764200:/var gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 -d /var/lib: (7.137178685s)
	I0731 10:34:12.187311   13443 oci.go:107] Successfully prepared a docker volume addons-764200
	I0731 10:34:12.187325   13443 preload.go:132] Checking if preload exists for k8s version v1.27.3 and runtime crio
	I0731 10:34:12.187347   13443 kic.go:190] Starting extracting preloaded images to volume ...
	I0731 10:34:12.187404   13443 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/16969-5799/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.27.3-cri-o-overlay-amd64.tar.lz4:/preloaded.tar:ro -v addons-764200:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 -I lz4 -xf /preloaded.tar -C /extractDir
	I0731 10:34:16.972609   13443 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/16969-5799/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.27.3-cri-o-overlay-amd64.tar.lz4:/preloaded.tar:ro -v addons-764200:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 -I lz4 -xf /preloaded.tar -C /extractDir: (4.785147134s)
	I0731 10:34:16.972639   13443 kic.go:199] duration metric: took 4.785290 seconds to extract preloaded images to volume
	W0731 10:34:16.972758   13443 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
	I0731 10:34:16.972854   13443 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
	I0731 10:34:17.022157   13443 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname addons-764200 --name addons-764200 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-764200 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=addons-764200 --network addons-764200 --ip 192.168.49.2 --volume addons-764200:/var --security-opt apparmor=unconfined --memory=4000mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631
	I0731 10:34:17.355554   13443 cli_runner.go:164] Run: docker container inspect addons-764200 --format={{.State.Running}}
	I0731 10:34:17.371855   13443 cli_runner.go:164] Run: docker container inspect addons-764200 --format={{.State.Status}}
	I0731 10:34:17.388762   13443 cli_runner.go:164] Run: docker exec addons-764200 stat /var/lib/dpkg/alternatives/iptables
	I0731 10:34:17.433716   13443 oci.go:144] the created container "addons-764200" has a running status.
	I0731 10:34:17.433745   13443 kic.go:221] Creating ssh key for kic: /home/jenkins/minikube-integration/16969-5799/.minikube/machines/addons-764200/id_rsa...
	I0731 10:34:17.507709   13443 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/16969-5799/.minikube/machines/addons-764200/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
	I0731 10:34:17.526021   13443 cli_runner.go:164] Run: docker container inspect addons-764200 --format={{.State.Status}}
	I0731 10:34:17.541973   13443 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
	I0731 10:34:17.542003   13443 kic_runner.go:114] Args: [docker exec --privileged addons-764200 chown docker:docker /home/docker/.ssh/authorized_keys]
	I0731 10:34:17.601929   13443 cli_runner.go:164] Run: docker container inspect addons-764200 --format={{.State.Status}}
	I0731 10:34:17.619342   13443 machine.go:88] provisioning docker machine ...
	I0731 10:34:17.619389   13443 ubuntu.go:169] provisioning hostname "addons-764200"
	I0731 10:34:17.619442   13443 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-764200
	I0731 10:34:17.640004   13443 main.go:141] libmachine: Using SSH client type: native
	I0731 10:34:17.640402   13443 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x80eb00] 0x811ba0 <nil>  [] 0s} 127.0.0.1 32772 <nil> <nil>}
	I0731 10:34:17.640417   13443 main.go:141] libmachine: About to run SSH command:
	sudo hostname addons-764200 && echo "addons-764200" | sudo tee /etc/hostname
	I0731 10:34:17.641629   13443 main.go:141] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:44536->127.0.0.1:32772: read: connection reset by peer
	I0731 10:34:20.775464   13443 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-764200
	
	I0731 10:34:20.775539   13443 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-764200
	I0731 10:34:20.792333   13443 main.go:141] libmachine: Using SSH client type: native
	I0731 10:34:20.792732   13443 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x80eb00] 0x811ba0 <nil>  [] 0s} 127.0.0.1 32772 <nil> <nil>}
	I0731 10:34:20.792749   13443 main.go:141] libmachine: About to run SSH command:
	
			if ! grep -xq '.*\saddons-764200' /etc/hosts; then
				if grep -xq '127.0.1.1\s.*' /etc/hosts; then
					sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 addons-764200/g' /etc/hosts;
				else 
					echo '127.0.1.1 addons-764200' | sudo tee -a /etc/hosts; 
				fi
			fi
	I0731 10:34:20.913920   13443 main.go:141] libmachine: SSH cmd err, output: <nil>: 
	I0731 10:34:20.913946   13443 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/16969-5799/.minikube CaCertPath:/home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/16969-5799/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/16969-5799/.minikube}
	I0731 10:34:20.913988   13443 ubuntu.go:177] setting up certificates
	I0731 10:34:20.914001   13443 provision.go:83] configureAuth start
	I0731 10:34:20.914046   13443 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-764200
	I0731 10:34:20.929496   13443 provision.go:138] copyHostCerts
	I0731 10:34:20.929570   13443 exec_runner.go:151] cp: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/16969-5799/.minikube/ca.pem (1082 bytes)
	I0731 10:34:20.929693   13443 exec_runner.go:151] cp: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/16969-5799/.minikube/cert.pem (1123 bytes)
	I0731 10:34:20.929778   13443 exec_runner.go:151] cp: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/16969-5799/.minikube/key.pem (1675 bytes)
	I0731 10:34:20.929855   13443 provision.go:112] generating server cert: /home/jenkins/minikube-integration/16969-5799/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca-key.pem org=jenkins.addons-764200 san=[192.168.49.2 127.0.0.1 localhost 127.0.0.1 minikube addons-764200]
	I0731 10:34:21.058941   13443 provision.go:172] copyRemoteCerts
	I0731 10:34:21.058993   13443 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
	I0731 10:34:21.059024   13443 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-764200
	I0731 10:34:21.074489   13443 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/addons-764200/id_rsa Username:docker}
	I0731 10:34:21.161625   13443 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
	I0731 10:34:21.181667   13443 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/machines/server.pem --> /etc/docker/server.pem (1216 bytes)
	I0731 10:34:21.200543   13443 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
	I0731 10:34:21.219707   13443 provision.go:86] duration metric: configureAuth took 305.692119ms
	I0731 10:34:21.219728   13443 ubuntu.go:193] setting minikube options for container-runtime
	I0731 10:34:21.219902   13443 config.go:182] Loaded profile config "addons-764200": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.27.3
	I0731 10:34:21.219998   13443 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-764200
	I0731 10:34:21.235551   13443 main.go:141] libmachine: Using SSH client type: native
	I0731 10:34:21.236100   13443 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x80eb00] 0x811ba0 <nil>  [] 0s} 127.0.0.1 32772 <nil> <nil>}
	I0731 10:34:21.236119   13443 main.go:141] libmachine: About to run SSH command:
	sudo mkdir -p /etc/sysconfig && printf %!s(MISSING) "
	CRIO_MINIKUBE_OPTIONS='--insecure-registry 10.96.0.0/12 '
	" | sudo tee /etc/sysconfig/crio.minikube && sudo systemctl restart crio
	I0731 10:34:21.439015   13443 main.go:141] libmachine: SSH cmd err, output: <nil>: 
	CRIO_MINIKUBE_OPTIONS='--insecure-registry 10.96.0.0/12 '
	
	I0731 10:34:21.439041   13443 machine.go:91] provisioned docker machine in 3.819676298s
	I0731 10:34:21.439052   13443 client.go:171] LocalClient.Create took 16.758093999s
	I0731 10:34:21.439073   13443 start.go:167] duration metric: libmachine.API.Create for "addons-764200" took 16.758147643s
	I0731 10:34:21.439082   13443 start.go:300] post-start starting for "addons-764200" (driver="docker")
	I0731 10:34:21.439092   13443 start.go:329] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
	I0731 10:34:21.439162   13443 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
	I0731 10:34:21.439213   13443 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-764200
	I0731 10:34:21.456031   13443 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/addons-764200/id_rsa Username:docker}
	I0731 10:34:21.546172   13443 ssh_runner.go:195] Run: cat /etc/os-release
	I0731 10:34:21.548971   13443 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
	I0731 10:34:21.549003   13443 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
	I0731 10:34:21.549012   13443 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
	I0731 10:34:21.549018   13443 info.go:137] Remote host: Ubuntu 22.04.2 LTS
	I0731 10:34:21.549030   13443 filesync.go:126] Scanning /home/jenkins/minikube-integration/16969-5799/.minikube/addons for local assets ...
	I0731 10:34:21.549077   13443 filesync.go:126] Scanning /home/jenkins/minikube-integration/16969-5799/.minikube/files for local assets ...
	I0731 10:34:21.549099   13443 start.go:303] post-start completed in 110.011233ms
	I0731 10:34:21.549373   13443 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-764200
	I0731 10:34:21.565089   13443 profile.go:148] Saving config to /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/config.json ...
	I0731 10:34:21.565309   13443 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I0731 10:34:21.565345   13443 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-764200
	I0731 10:34:21.581707   13443 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/addons-764200/id_rsa Username:docker}
	I0731 10:34:21.666430   13443 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
	I0731 10:34:21.670082   13443 start.go:128] duration metric: createHost completed in 16.991170237s
	I0731 10:34:21.670101   13443 start.go:83] releasing machines lock for "addons-764200", held for 16.991357306s
	I0731 10:34:21.670160   13443 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-764200
	I0731 10:34:21.685606   13443 ssh_runner.go:195] Run: cat /version.json
	I0731 10:34:21.685657   13443 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-764200
	I0731 10:34:21.685715   13443 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
	I0731 10:34:21.685769   13443 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-764200
	I0731 10:34:21.702240   13443 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/addons-764200/id_rsa Username:docker}
	I0731 10:34:21.702633   13443 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/addons-764200/id_rsa Username:docker}
	I0731 10:34:21.785346   13443 ssh_runner.go:195] Run: systemctl --version
	I0731 10:34:21.880954   13443 ssh_runner.go:195] Run: sudo sh -c "podman version >/dev/null"
	I0731 10:34:22.015005   13443 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
	I0731 10:34:22.018932   13443 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "sudo mv {} {}.mk_disabled" ;
	I0731 10:34:22.035501   13443 cni.go:221] loopback cni configuration disabled: "/etc/cni/net.d/*loopback.conf*" found
	I0731 10:34:22.035573   13443 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%!p(MISSING), " -exec sh -c "sudo mv {} {}.mk_disabled" ;
	I0731 10:34:22.059879   13443 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
	I0731 10:34:22.059906   13443 start.go:466] detecting cgroup driver to use...
	I0731 10:34:22.059939   13443 detect.go:196] detected "cgroupfs" cgroup driver on host os
	I0731 10:34:22.059995   13443 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
	I0731 10:34:22.072242   13443 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
	I0731 10:34:22.081157   13443 docker.go:196] disabling cri-docker service (if available) ...
	I0731 10:34:22.081215   13443 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
	I0731 10:34:22.092271   13443 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
	I0731 10:34:22.103608   13443 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
	I0731 10:34:22.178090   13443 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
	I0731 10:34:22.252892   13443 docker.go:212] disabling docker service ...
	I0731 10:34:22.252952   13443 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
	I0731 10:34:22.268531   13443 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
	I0731 10:34:22.277873   13443 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
	I0731 10:34:22.349815   13443 ssh_runner.go:195] Run: sudo systemctl mask docker.service
	I0731 10:34:22.425629   13443 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
	I0731 10:34:22.435094   13443 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %!s(MISSING) "runtime-endpoint: unix:///var/run/crio/crio.sock
	" | sudo tee /etc/crictl.yaml"
	I0731 10:34:22.448414   13443 crio.go:59] configure cri-o to use "registry.k8s.io/pause:3.9" pause image...
	I0731 10:34:22.448467   13443 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|^.*pause_image = .*$|pause_image = "registry.k8s.io/pause:3.9"|' /etc/crio/crio.conf.d/02-crio.conf"
	I0731 10:34:22.456583   13443 crio.go:70] configuring cri-o to use "cgroupfs" as cgroup driver...
	I0731 10:34:22.456641   13443 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|^.*cgroup_manager = .*$|cgroup_manager = "cgroupfs"|' /etc/crio/crio.conf.d/02-crio.conf"
	I0731 10:34:22.464670   13443 ssh_runner.go:195] Run: sh -c "sudo sed -i '/conmon_cgroup = .*/d' /etc/crio/crio.conf.d/02-crio.conf"
	I0731 10:34:22.472392   13443 ssh_runner.go:195] Run: sh -c "sudo sed -i '/cgroup_manager = .*/a conmon_cgroup = "pod"' /etc/crio/crio.conf.d/02-crio.conf"
	I0731 10:34:22.480267   13443 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
	I0731 10:34:22.487536   13443 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
	I0731 10:34:22.494241   13443 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
	I0731 10:34:22.500904   13443 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0731 10:34:22.576872   13443 ssh_runner.go:195] Run: sudo systemctl restart crio
	I0731 10:34:22.685848   13443 start.go:513] Will wait 60s for socket path /var/run/crio/crio.sock
	I0731 10:34:22.685922   13443 ssh_runner.go:195] Run: stat /var/run/crio/crio.sock
	I0731 10:34:22.689090   13443 start.go:534] Will wait 60s for crictl version
	I0731 10:34:22.689150   13443 ssh_runner.go:195] Run: which crictl
	I0731 10:34:22.692046   13443 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
	I0731 10:34:22.723367   13443 start.go:550] Version:  0.1.0
	RuntimeName:  cri-o
	RuntimeVersion:  1.24.6
	RuntimeApiVersion:  v1
	I0731 10:34:22.723454   13443 ssh_runner.go:195] Run: crio --version
	I0731 10:34:22.756040   13443 ssh_runner.go:195] Run: crio --version
	I0731 10:34:22.789774   13443 out.go:177] * Preparing Kubernetes v1.27.3 on CRI-O 1.24.6 ...
	I0731 10:34:22.791232   13443 cli_runner.go:164] Run: docker network inspect addons-764200 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
	I0731 10:34:22.806075   13443 ssh_runner.go:195] Run: grep 192.168.49.1	host.minikube.internal$ /etc/hosts
	I0731 10:34:22.809272   13443 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1	host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I0731 10:34:22.818549   13443 preload.go:132] Checking if preload exists for k8s version v1.27.3 and runtime crio
	I0731 10:34:22.818594   13443 ssh_runner.go:195] Run: sudo crictl images --output json
	I0731 10:34:22.863314   13443 crio.go:496] all images are preloaded for cri-o runtime.
	I0731 10:34:22.863333   13443 crio.go:415] Images already preloaded, skipping extraction
	I0731 10:34:22.863388   13443 ssh_runner.go:195] Run: sudo crictl images --output json
	I0731 10:34:22.892058   13443 crio.go:496] all images are preloaded for cri-o runtime.
	I0731 10:34:22.892075   13443 cache_images.go:84] Images are preloaded, skipping loading
	I0731 10:34:22.892130   13443 ssh_runner.go:195] Run: crio config
	I0731 10:34:22.929580   13443 cni.go:84] Creating CNI manager for ""
	I0731 10:34:22.929600   13443 cni.go:143] "docker" driver + "crio" runtime found, recommending kindnet
	I0731 10:34:22.929613   13443 kubeadm.go:87] Using pod CIDR: 10.244.0.0/16
	I0731 10:34:22.929631   13443 kubeadm.go:176] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.27.3 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:addons-764200 NodeName:addons-764200 DNSDomain:cluster.local CRISocket:/var/run/crio/crio.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kuberne
tes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
	I0731 10:34:22.929766   13443 kubeadm.go:181] kubeadm config:
	apiVersion: kubeadm.k8s.io/v1beta3
	kind: InitConfiguration
	localAPIEndpoint:
	  advertiseAddress: 192.168.49.2
	  bindPort: 8443
	bootstrapTokens:
	  - groups:
	      - system:bootstrappers:kubeadm:default-node-token
	    ttl: 24h0m0s
	    usages:
	      - signing
	      - authentication
	nodeRegistration:
	  criSocket: unix:///var/run/crio/crio.sock
	  name: "addons-764200"
	  kubeletExtraArgs:
	    node-ip: 192.168.49.2
	  taints: []
	---
	apiVersion: kubeadm.k8s.io/v1beta3
	kind: ClusterConfiguration
	apiServer:
	  certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
	  extraArgs:
	    enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
	controllerManager:
	  extraArgs:
	    allocate-node-cidrs: "true"
	    leader-elect: "false"
	scheduler:
	  extraArgs:
	    leader-elect: "false"
	certificatesDir: /var/lib/minikube/certs
	clusterName: mk
	controlPlaneEndpoint: control-plane.minikube.internal:8443
	etcd:
	  local:
	    dataDir: /var/lib/minikube/etcd
	    extraArgs:
	      proxy-refresh-interval: "70000"
	kubernetesVersion: v1.27.3
	networking:
	  dnsDomain: cluster.local
	  podSubnet: "10.244.0.0/16"
	  serviceSubnet: 10.96.0.0/12
	---
	apiVersion: kubelet.config.k8s.io/v1beta1
	kind: KubeletConfiguration
	authentication:
	  x509:
	    clientCAFile: /var/lib/minikube/certs/ca.crt
	cgroupDriver: cgroupfs
	hairpinMode: hairpin-veth
	runtimeRequestTimeout: 15m
	clusterDomain: "cluster.local"
	# disable disk resource management by default
	imageGCHighThresholdPercent: 100
	evictionHard:
	  nodefs.available: "0%!"(MISSING)
	  nodefs.inodesFree: "0%!"(MISSING)
	  imagefs.available: "0%!"(MISSING)
	failSwapOn: false
	staticPodPath: /etc/kubernetes/manifests
	---
	apiVersion: kubeproxy.config.k8s.io/v1alpha1
	kind: KubeProxyConfiguration
	clusterCIDR: "10.244.0.0/16"
	metricsBindAddress: 0.0.0.0:10249
	conntrack:
	  maxPerCore: 0
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
	  tcpEstablishedTimeout: 0s
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
	  tcpCloseWaitTimeout: 0s
	
	I0731 10:34:22.929828   13443 kubeadm.go:976] kubelet [Unit]
	Wants=crio.service
	
	[Service]
	ExecStart=
	ExecStart=/var/lib/minikube/binaries/v1.27.3/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroups-per-qos=false --config=/var/lib/kubelet/config.yaml --container-runtime-endpoint=unix:///var/run/crio/crio.sock --enforce-node-allocatable= --hostname-override=addons-764200 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
	
	[Install]
	 config:
	{KubernetesVersion:v1.27.3 ClusterName:addons-764200 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:}
	I0731 10:34:22.929875   13443 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.27.3
	I0731 10:34:22.937279   13443 binaries.go:44] Found k8s binaries, skipping transfer
	I0731 10:34:22.937332   13443 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
	I0731 10:34:22.944566   13443 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (423 bytes)
	I0731 10:34:22.958998   13443 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
	I0731 10:34:22.973428   13443 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2094 bytes)
	I0731 10:34:22.987337   13443 ssh_runner.go:195] Run: grep 192.168.49.2	control-plane.minikube.internal$ /etc/hosts
	I0731 10:34:22.990028   13443 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.2	control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I0731 10:34:22.998633   13443 certs.go:56] Setting up /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200 for IP: 192.168.49.2
	I0731 10:34:22.998661   13443 certs.go:190] acquiring lock for shared ca certs: {Name:mke1f008d411b97835fe7ef4c9ac6bdba0705009 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0731 10:34:22.998769   13443 certs.go:204] generating minikubeCA CA: /home/jenkins/minikube-integration/16969-5799/.minikube/ca.key
	I0731 10:34:23.230407   13443 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/16969-5799/.minikube/ca.crt ...
	I0731 10:34:23.230433   13443 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/16969-5799/.minikube/ca.crt: {Name:mka636e2da7f3ee5b6faa152827a3d67cd2d7230 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0731 10:34:23.230609   13443 crypto.go:164] Writing key to /home/jenkins/minikube-integration/16969-5799/.minikube/ca.key ...
	I0731 10:34:23.230623   13443 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/16969-5799/.minikube/ca.key: {Name:mk1973dfecca2375aa20e31ab7b247068b85c2ca Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0731 10:34:23.230717   13443 certs.go:204] generating proxyClientCA CA: /home/jenkins/minikube-integration/16969-5799/.minikube/proxy-client-ca.key
	I0731 10:34:23.697025   13443 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/16969-5799/.minikube/proxy-client-ca.crt ...
	I0731 10:34:23.697057   13443 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/16969-5799/.minikube/proxy-client-ca.crt: {Name:mk4710b37b19bd3db156e12feb0cfcfd935133dc Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0731 10:34:23.697248   13443 crypto.go:164] Writing key to /home/jenkins/minikube-integration/16969-5799/.minikube/proxy-client-ca.key ...
	I0731 10:34:23.697264   13443 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/16969-5799/.minikube/proxy-client-ca.key: {Name:mkb37abe05569766c330a19ffce6d601bfb7ae68 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0731 10:34:23.697394   13443 certs.go:319] generating minikube-user signed cert: /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/client.key
	I0731 10:34:23.697410   13443 crypto.go:68] Generating cert /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/client.crt with IP's: []
	I0731 10:34:23.798042   13443 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/client.crt ...
	I0731 10:34:23.798073   13443 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/client.crt: {Name:mkc478e763146dcfbd653e32eb2cbdcce58dd998 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0731 10:34:23.798261   13443 crypto.go:164] Writing key to /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/client.key ...
	I0731 10:34:23.798277   13443 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/client.key: {Name:mk4bf40da0219a221f205dccbcabe01e2214dbdb Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0731 10:34:23.798370   13443 certs.go:319] generating minikube signed cert: /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/apiserver.key.dd3b5fb2
	I0731 10:34:23.798391   13443 crypto.go:68] Generating cert /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/apiserver.crt.dd3b5fb2 with IP's: [192.168.49.2 10.96.0.1 127.0.0.1 10.0.0.1]
	I0731 10:34:23.917771   13443 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/apiserver.crt.dd3b5fb2 ...
	I0731 10:34:23.917800   13443 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/apiserver.crt.dd3b5fb2: {Name:mkdc83899fba20f7bc398b8047d099deb9f615a9 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0731 10:34:23.917976   13443 crypto.go:164] Writing key to /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/apiserver.key.dd3b5fb2 ...
	I0731 10:34:23.917991   13443 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/apiserver.key.dd3b5fb2: {Name:mk4dfc9dc883a6fe3502581b3354612a6a7c5126 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0731 10:34:23.918083   13443 certs.go:337] copying /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/apiserver.crt.dd3b5fb2 -> /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/apiserver.crt
	I0731 10:34:23.918169   13443 certs.go:341] copying /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/apiserver.key.dd3b5fb2 -> /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/apiserver.key
	I0731 10:34:23.918258   13443 certs.go:319] generating aggregator signed cert: /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/proxy-client.key
	I0731 10:34:23.918282   13443 crypto.go:68] Generating cert /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/proxy-client.crt with IP's: []
	I0731 10:34:24.153061   13443 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/proxy-client.crt ...
	I0731 10:34:24.153091   13443 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/proxy-client.crt: {Name:mk4523921c548253e3d5620add8577c272a16206 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0731 10:34:24.153262   13443 crypto.go:164] Writing key to /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/proxy-client.key ...
	I0731 10:34:24.153275   13443 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/proxy-client.key: {Name:mkbc653077489c1ed014f2c063e1896d6c81aeb0 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0731 10:34:24.153469   13443 certs.go:437] found cert: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca-key.pem (1675 bytes)
	I0731 10:34:24.153524   13443 certs.go:437] found cert: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem (1082 bytes)
	I0731 10:34:24.153575   13443 certs.go:437] found cert: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/home/jenkins/minikube-integration/16969-5799/.minikube/certs/cert.pem (1123 bytes)
	I0731 10:34:24.153613   13443 certs.go:437] found cert: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/home/jenkins/minikube-integration/16969-5799/.minikube/certs/key.pem (1675 bytes)
	I0731 10:34:24.154176   13443 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1399 bytes)
	I0731 10:34:24.175382   13443 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
	I0731 10:34:24.195254   13443 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
	I0731 10:34:24.214554   13443 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
	I0731 10:34:24.233746   13443 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
	I0731 10:34:24.253061   13443 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
	I0731 10:34:24.272030   13443 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
	I0731 10:34:24.291419   13443 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
	I0731 10:34:24.310493   13443 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
	I0731 10:34:24.329617   13443 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
	I0731 10:34:24.343793   13443 ssh_runner.go:195] Run: openssl version
	I0731 10:34:24.348351   13443 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
	I0731 10:34:24.356079   13443 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
	I0731 10:34:24.359061   13443 certs.go:480] hashing: -rw-r--r-- 1 root root 1111 Jul 31 10:34 /usr/share/ca-certificates/minikubeCA.pem
	I0731 10:34:24.359107   13443 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
	I0731 10:34:24.364832   13443 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
	I0731 10:34:24.372313   13443 ssh_runner.go:195] Run: ls /var/lib/minikube/certs/etcd
	I0731 10:34:24.374924   13443 certs.go:353] certs directory doesn't exist, likely first start: ls /var/lib/minikube/certs/etcd: Process exited with status 2
	stdout:
	
	stderr:
	ls: cannot access '/var/lib/minikube/certs/etcd': No such file or directory
	I0731 10:34:24.374970   13443 kubeadm.go:404] StartCluster: {Name:addons-764200 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 Memory:4000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.27.3 ClusterName:addons-764200 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:clu
ster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.27.3 ContainerRuntime:crio ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwareP
ath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0}
	I0731 10:34:24.375045   13443 cri.go:54] listing CRI containers in root : {State:paused Name: Namespaces:[kube-system]}
	I0731 10:34:24.375094   13443 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
	I0731 10:34:24.406137   13443 cri.go:89] found id: ""
	I0731 10:34:24.406207   13443 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
	I0731 10:34:24.413579   13443 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
	I0731 10:34:24.420833   13443 kubeadm.go:226] ignoring SystemVerification for kubeadm because of docker driver
	I0731 10:34:24.420894   13443 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
	I0731 10:34:24.427805   13443 kubeadm.go:152] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
	stdout:
	
	stderr:
	ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
	I0731 10:34:24.427842   13443 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.27.3:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml  --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
	I0731 10:34:24.468675   13443 kubeadm.go:322] [init] Using Kubernetes version: v1.27.3
	I0731 10:34:24.468758   13443 kubeadm.go:322] [preflight] Running pre-flight checks
	I0731 10:34:24.501320   13443 kubeadm.go:322] [preflight] The system verification failed. Printing the output from the verification:
	I0731 10:34:24.501437   13443 kubeadm.go:322] KERNEL_VERSION: 5.15.0-1038-gcp
	I0731 10:34:24.501501   13443 kubeadm.go:322] OS: Linux
	I0731 10:34:24.501567   13443 kubeadm.go:322] CGROUPS_CPU: enabled
	I0731 10:34:24.501638   13443 kubeadm.go:322] CGROUPS_CPUACCT: enabled
	I0731 10:34:24.501716   13443 kubeadm.go:322] CGROUPS_CPUSET: enabled
	I0731 10:34:24.501772   13443 kubeadm.go:322] CGROUPS_DEVICES: enabled
	I0731 10:34:24.501842   13443 kubeadm.go:322] CGROUPS_FREEZER: enabled
	I0731 10:34:24.501922   13443 kubeadm.go:322] CGROUPS_MEMORY: enabled
	I0731 10:34:24.501992   13443 kubeadm.go:322] CGROUPS_PIDS: enabled
	I0731 10:34:24.502065   13443 kubeadm.go:322] CGROUPS_HUGETLB: enabled
	I0731 10:34:24.502132   13443 kubeadm.go:322] CGROUPS_BLKIO: enabled
	I0731 10:34:24.559259   13443 kubeadm.go:322] [preflight] Pulling images required for setting up a Kubernetes cluster
	I0731 10:34:24.559464   13443 kubeadm.go:322] [preflight] This might take a minute or two, depending on the speed of your internet connection
	I0731 10:34:24.559552   13443 kubeadm.go:322] [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
	I0731 10:34:24.737277   13443 kubeadm.go:322] [certs] Using certificateDir folder "/var/lib/minikube/certs"
	I0731 10:34:24.739925   13443 out.go:204]   - Generating certificates and keys ...
	I0731 10:34:24.740054   13443 kubeadm.go:322] [certs] Using existing ca certificate authority
	I0731 10:34:24.740148   13443 kubeadm.go:322] [certs] Using existing apiserver certificate and key on disk
	I0731 10:34:24.914337   13443 kubeadm.go:322] [certs] Generating "apiserver-kubelet-client" certificate and key
	I0731 10:34:25.009698   13443 kubeadm.go:322] [certs] Generating "front-proxy-ca" certificate and key
	I0731 10:34:25.114962   13443 kubeadm.go:322] [certs] Generating "front-proxy-client" certificate and key
	I0731 10:34:25.199748   13443 kubeadm.go:322] [certs] Generating "etcd/ca" certificate and key
	I0731 10:34:25.500417   13443 kubeadm.go:322] [certs] Generating "etcd/server" certificate and key
	I0731 10:34:25.500534   13443 kubeadm.go:322] [certs] etcd/server serving cert is signed for DNS names [addons-764200 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
	I0731 10:34:25.730573   13443 kubeadm.go:322] [certs] Generating "etcd/peer" certificate and key
	I0731 10:34:25.730808   13443 kubeadm.go:322] [certs] etcd/peer serving cert is signed for DNS names [addons-764200 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
	I0731 10:34:25.854668   13443 kubeadm.go:322] [certs] Generating "etcd/healthcheck-client" certificate and key
	I0731 10:34:25.894114   13443 kubeadm.go:322] [certs] Generating "apiserver-etcd-client" certificate and key
	I0731 10:34:25.931513   13443 kubeadm.go:322] [certs] Generating "sa" key and public key
	I0731 10:34:25.931581   13443 kubeadm.go:322] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
	I0731 10:34:26.032151   13443 kubeadm.go:322] [kubeconfig] Writing "admin.conf" kubeconfig file
	I0731 10:34:26.142899   13443 kubeadm.go:322] [kubeconfig] Writing "kubelet.conf" kubeconfig file
	I0731 10:34:26.225173   13443 kubeadm.go:322] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
	I0731 10:34:26.359546   13443 kubeadm.go:322] [kubeconfig] Writing "scheduler.conf" kubeconfig file
	I0731 10:34:26.367801   13443 kubeadm.go:322] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
	I0731 10:34:26.368594   13443 kubeadm.go:322] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
	I0731 10:34:26.368665   13443 kubeadm.go:322] [kubelet-start] Starting the kubelet
	I0731 10:34:26.447108   13443 kubeadm.go:322] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
	I0731 10:34:26.449404   13443 out.go:204]   - Booting up control plane ...
	I0731 10:34:26.449509   13443 kubeadm.go:322] [control-plane] Creating static Pod manifest for "kube-apiserver"
	I0731 10:34:26.450623   13443 kubeadm.go:322] [control-plane] Creating static Pod manifest for "kube-controller-manager"
	I0731 10:34:26.451462   13443 kubeadm.go:322] [control-plane] Creating static Pod manifest for "kube-scheduler"
	I0731 10:34:26.452236   13443 kubeadm.go:322] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
	I0731 10:34:26.454102   13443 kubeadm.go:322] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
	I0731 10:34:31.455634   13443 kubeadm.go:322] [apiclient] All control plane components are healthy after 5.001565 seconds
	I0731 10:34:31.455802   13443 kubeadm.go:322] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
	I0731 10:34:31.466989   13443 kubeadm.go:322] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
	I0731 10:34:32.007123   13443 kubeadm.go:322] [upload-certs] Skipping phase. Please see --upload-certs
	I0731 10:34:32.007465   13443 kubeadm.go:322] [mark-control-plane] Marking the node addons-764200 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
	I0731 10:34:32.516717   13443 kubeadm.go:322] [bootstrap-token] Using token: 7xmf4t.o4sft14gm2pn9h03
	I0731 10:34:32.518223   13443 out.go:204]   - Configuring RBAC rules ...
	I0731 10:34:32.518358   13443 kubeadm.go:322] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
	I0731 10:34:32.521885   13443 kubeadm.go:322] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
	I0731 10:34:32.528830   13443 kubeadm.go:322] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
	I0731 10:34:32.531278   13443 kubeadm.go:322] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
	I0731 10:34:32.533778   13443 kubeadm.go:322] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
	I0731 10:34:32.536221   13443 kubeadm.go:322] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
	I0731 10:34:32.545409   13443 kubeadm.go:322] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
	I0731 10:34:32.772312   13443 kubeadm.go:322] [addons] Applied essential addon: CoreDNS
	I0731 10:34:32.926553   13443 kubeadm.go:322] [addons] Applied essential addon: kube-proxy
	I0731 10:34:32.927460   13443 kubeadm.go:322] 
	I0731 10:34:32.927596   13443 kubeadm.go:322] Your Kubernetes control-plane has initialized successfully!
	I0731 10:34:32.927617   13443 kubeadm.go:322] 
	I0731 10:34:32.927733   13443 kubeadm.go:322] To start using your cluster, you need to run the following as a regular user:
	I0731 10:34:32.927753   13443 kubeadm.go:322] 
	I0731 10:34:32.927800   13443 kubeadm.go:322]   mkdir -p $HOME/.kube
	I0731 10:34:32.927884   13443 kubeadm.go:322]   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
	I0731 10:34:32.927959   13443 kubeadm.go:322]   sudo chown $(id -u):$(id -g) $HOME/.kube/config
	I0731 10:34:32.927968   13443 kubeadm.go:322] 
	I0731 10:34:32.928049   13443 kubeadm.go:322] Alternatively, if you are the root user, you can run:
	I0731 10:34:32.928067   13443 kubeadm.go:322] 
	I0731 10:34:32.928138   13443 kubeadm.go:322]   export KUBECONFIG=/etc/kubernetes/admin.conf
	I0731 10:34:32.928151   13443 kubeadm.go:322] 
	I0731 10:34:32.928223   13443 kubeadm.go:322] You should now deploy a pod network to the cluster.
	I0731 10:34:32.928334   13443 kubeadm.go:322] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
	I0731 10:34:32.928435   13443 kubeadm.go:322]   https://kubernetes.io/docs/concepts/cluster-administration/addons/
	I0731 10:34:32.928444   13443 kubeadm.go:322] 
	I0731 10:34:32.928563   13443 kubeadm.go:322] You can now join any number of control-plane nodes by copying certificate authorities
	I0731 10:34:32.928688   13443 kubeadm.go:322] and service account keys on each node and then running the following as root:
	I0731 10:34:32.928697   13443 kubeadm.go:322] 
	I0731 10:34:32.928807   13443 kubeadm.go:322]   kubeadm join control-plane.minikube.internal:8443 --token 7xmf4t.o4sft14gm2pn9h03 \
	I0731 10:34:32.928956   13443 kubeadm.go:322] 	--discovery-token-ca-cert-hash sha256:332f1e2c8f6e50afb8fc2995698452a3be7de85c383b512a61c05acf2d3047a4 \
	I0731 10:34:32.928988   13443 kubeadm.go:322] 	--control-plane 
	I0731 10:34:32.929006   13443 kubeadm.go:322] 
	I0731 10:34:32.929126   13443 kubeadm.go:322] Then you can join any number of worker nodes by running the following on each as root:
	I0731 10:34:32.929137   13443 kubeadm.go:322] 
	I0731 10:34:32.929239   13443 kubeadm.go:322] kubeadm join control-plane.minikube.internal:8443 --token 7xmf4t.o4sft14gm2pn9h03 \
	I0731 10:34:32.929354   13443 kubeadm.go:322] 	--discovery-token-ca-cert-hash sha256:332f1e2c8f6e50afb8fc2995698452a3be7de85c383b512a61c05acf2d3047a4 
	I0731 10:34:32.930685   13443 kubeadm.go:322] 	[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1038-gcp\n", err: exit status 1
	I0731 10:34:32.930824   13443 kubeadm.go:322] 	[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
	I0731 10:34:32.930859   13443 cni.go:84] Creating CNI manager for ""
	I0731 10:34:32.930867   13443 cni.go:143] "docker" driver + "crio" runtime found, recommending kindnet
	I0731 10:34:32.932609   13443 out.go:177] * Configuring CNI (Container Networking Interface) ...
	I0731 10:34:32.934015   13443 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
	I0731 10:34:32.937628   13443 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.27.3/kubectl ...
	I0731 10:34:32.937642   13443 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2438 bytes)
	I0731 10:34:32.953110   13443 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
	I0731 10:34:33.600368   13443 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
	I0731 10:34:33.600441   13443 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:34:33.600441   13443 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl label nodes minikube.k8s.io/version=v1.31.1 minikube.k8s.io/commit=a7848ba25aaaad8ebb50e721c0d343e471188fc7 minikube.k8s.io/name=addons-764200 minikube.k8s.io/updated_at=2023_07_31T10_34_33_0700 minikube.k8s.io/primary=true --all --overwrite --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:34:33.607338   13443 ops.go:34] apiserver oom_adj: -16
	I0731 10:34:33.671113   13443 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:34:33.742807   13443 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:34:34.302610   13443 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:34:34.802755   13443 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:34:35.302574   13443 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:34:35.802040   13443 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:34:36.302153   13443 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:34:36.802171   13443 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:34:37.302651   13443 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:34:37.802604   13443 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:34:38.301855   13443 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:34:38.801879   13443 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:34:39.301915   13443 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:34:39.802284   13443 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:34:40.302473   13443 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:34:40.801834   13443 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:34:41.302387   13443 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:34:41.802475   13443 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:34:42.301765   13443 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:34:42.801802   13443 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:34:43.301985   13443 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:34:43.802745   13443 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:34:44.301989   13443 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:34:44.802358   13443 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:34:45.301846   13443 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:34:45.801753   13443 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:34:45.864726   13443 kubeadm.go:1081] duration metric: took 12.264344597s to wait for elevateKubeSystemPrivileges.
	I0731 10:34:45.864756   13443 kubeadm.go:406] StartCluster complete in 21.489789152s
	I0731 10:34:45.864773   13443 settings.go:142] acquiring lock: {Name:mk1af30688f984f447d2a45e33362813edbbcab6 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0731 10:34:45.864864   13443 settings.go:150] Updating kubeconfig:  /home/jenkins/minikube-integration/16969-5799/kubeconfig
	I0731 10:34:45.865190   13443 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/16969-5799/kubeconfig: {Name:mkf8010bda730fc5f9ac63bea8b114101911b8e2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0731 10:34:45.865351   13443 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.27.3/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
	I0731 10:34:45.865434   13443 addons.go:499] enable addons start: toEnable=map[ambassador:false auto-pause:false cloud-spanner:true csi-hostpath-driver:true dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:true gvisor:false headlamp:false helm-tiller:true inaccel:false ingress:true ingress-dns:true inspektor-gadget:true istio:false istio-provisioner:false kong:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:true registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false volumesnapshots:true]
	I0731 10:34:45.865533   13443 addons.go:69] Setting volumesnapshots=true in profile "addons-764200"
	I0731 10:34:45.865558   13443 addons.go:231] Setting addon volumesnapshots=true in "addons-764200"
	I0731 10:34:45.865556   13443 addons.go:69] Setting ingress=true in profile "addons-764200"
	I0731 10:34:45.865579   13443 addons.go:231] Setting addon ingress=true in "addons-764200"
	I0731 10:34:45.865587   13443 config.go:182] Loaded profile config "addons-764200": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.27.3
	I0731 10:34:45.865608   13443 host.go:66] Checking if "addons-764200" exists ...
	I0731 10:34:45.865624   13443 addons.go:69] Setting metrics-server=true in profile "addons-764200"
	I0731 10:34:45.865637   13443 addons.go:231] Setting addon metrics-server=true in "addons-764200"
	I0731 10:34:45.865644   13443 host.go:66] Checking if "addons-764200" exists ...
	I0731 10:34:45.865675   13443 host.go:66] Checking if "addons-764200" exists ...
	I0731 10:34:45.865703   13443 addons.go:69] Setting ingress-dns=true in profile "addons-764200"
	I0731 10:34:45.865748   13443 addons.go:231] Setting addon ingress-dns=true in "addons-764200"
	I0731 10:34:45.865814   13443 host.go:66] Checking if "addons-764200" exists ...
	I0731 10:34:45.866072   13443 cli_runner.go:164] Run: docker container inspect addons-764200 --format={{.State.Status}}
	I0731 10:34:45.866127   13443 cli_runner.go:164] Run: docker container inspect addons-764200 --format={{.State.Status}}
	I0731 10:34:45.866145   13443 addons.go:69] Setting cloud-spanner=true in profile "addons-764200"
	I0731 10:34:45.866161   13443 addons.go:231] Setting addon cloud-spanner=true in "addons-764200"
	I0731 10:34:45.866213   13443 host.go:66] Checking if "addons-764200" exists ...
	I0731 10:34:45.866131   13443 cli_runner.go:164] Run: docker container inspect addons-764200 --format={{.State.Status}}
	I0731 10:34:45.866280   13443 cli_runner.go:164] Run: docker container inspect addons-764200 --format={{.State.Status}}
	I0731 10:34:45.866318   13443 addons.go:69] Setting inspektor-gadget=true in profile "addons-764200"
	I0731 10:34:45.866330   13443 addons.go:231] Setting addon inspektor-gadget=true in "addons-764200"
	I0731 10:34:45.866365   13443 host.go:66] Checking if "addons-764200" exists ...
	I0731 10:34:45.866462   13443 addons.go:69] Setting csi-hostpath-driver=true in profile "addons-764200"
	I0731 10:34:45.866490   13443 addons.go:69] Setting registry=true in profile "addons-764200"
	I0731 10:34:45.866503   13443 addons.go:231] Setting addon registry=true in "addons-764200"
	I0731 10:34:45.866531   13443 addons.go:231] Setting addon csi-hostpath-driver=true in "addons-764200"
	I0731 10:34:45.866536   13443 host.go:66] Checking if "addons-764200" exists ...
	I0731 10:34:45.866576   13443 host.go:66] Checking if "addons-764200" exists ...
	I0731 10:34:45.866606   13443 cli_runner.go:164] Run: docker container inspect addons-764200 --format={{.State.Status}}
	I0731 10:34:45.866692   13443 addons.go:69] Setting gcp-auth=true in profile "addons-764200"
	I0731 10:34:45.866709   13443 mustload.go:65] Loading cluster: addons-764200
	I0731 10:34:45.866755   13443 cli_runner.go:164] Run: docker container inspect addons-764200 --format={{.State.Status}}
	I0731 10:34:45.866847   13443 addons.go:69] Setting storage-provisioner=true in profile "addons-764200"
	I0731 10:34:45.866858   13443 addons.go:231] Setting addon storage-provisioner=true in "addons-764200"
	I0731 10:34:45.866889   13443 host.go:66] Checking if "addons-764200" exists ...
	I0731 10:34:45.866888   13443 config.go:182] Loaded profile config "addons-764200": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.27.3
	I0731 10:34:45.866958   13443 cli_runner.go:164] Run: docker container inspect addons-764200 --format={{.State.Status}}
	I0731 10:34:45.867018   13443 cli_runner.go:164] Run: docker container inspect addons-764200 --format={{.State.Status}}
	I0731 10:34:45.867073   13443 addons.go:69] Setting helm-tiller=true in profile "addons-764200"
	I0731 10:34:45.867084   13443 addons.go:231] Setting addon helm-tiller=true in "addons-764200"
	I0731 10:34:45.867113   13443 host.go:66] Checking if "addons-764200" exists ...
	I0731 10:34:45.867121   13443 cli_runner.go:164] Run: docker container inspect addons-764200 --format={{.State.Status}}
	I0731 10:34:45.867262   13443 cli_runner.go:164] Run: docker container inspect addons-764200 --format={{.State.Status}}
	I0731 10:34:45.867247   13443 addons.go:69] Setting default-storageclass=true in profile "addons-764200"
	I0731 10:34:45.867290   13443 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "addons-764200"
	I0731 10:34:45.867514   13443 cli_runner.go:164] Run: docker container inspect addons-764200 --format={{.State.Status}}
	I0731 10:34:45.867607   13443 cli_runner.go:164] Run: docker container inspect addons-764200 --format={{.State.Status}}
	I0731 10:34:45.892861   13443 out.go:177]   - Using image gcr.io/k8s-minikube/minikube-ingress-dns:0.0.2
	I0731 10:34:45.894464   13443 addons.go:423] installing /etc/kubernetes/addons/ingress-dns-pod.yaml
	I0731 10:34:45.894489   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-dns-pod.yaml (2442 bytes)
	I0731 10:34:45.894530   13443 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-764200
	I0731 10:34:45.901529   13443 out.go:177]   - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20230407
	I0731 10:34:45.903908   13443 out.go:177]   - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20230407
	I0731 10:34:45.903435   13443 out.go:177]   - Using image registry.k8s.io/sig-storage/snapshot-controller:v6.1.0
	I0731 10:34:45.908187   13443 addons.go:423] installing /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml
	I0731 10:34:45.908205   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml (934 bytes)
	I0731 10:34:45.908261   13443 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-764200
	I0731 10:34:45.906034   13443 out.go:177]   - Using image registry.k8s.io/ingress-nginx/controller:v1.8.1
	I0731 10:34:45.910452   13443 out.go:177]   - Using image registry.k8s.io/metrics-server/metrics-server:v0.6.4
	I0731 10:34:45.911670   13443 addons.go:423] installing /etc/kubernetes/addons/metrics-apiservice.yaml
	I0731 10:34:45.911685   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
	I0731 10:34:45.911736   13443 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-764200
	I0731 10:34:45.910423   13443 addons.go:423] installing /etc/kubernetes/addons/ingress-deploy.yaml
	I0731 10:34:45.911916   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-deploy.yaml (16083 bytes)
	I0731 10:34:45.911954   13443 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-764200
	I0731 10:34:45.913988   13443 out.go:177]   - Using image gcr.io/cloud-spanner-emulator/emulator:1.5.7
	I0731 10:34:45.912831   13443 kapi.go:248] "coredns" deployment in "kube-system" namespace and "addons-764200" context rescaled to 1 replicas
	I0731 10:34:45.915423   13443 start.go:223] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.27.3 ContainerRuntime:crio ControlPlane:true Worker:true}
	I0731 10:34:45.917125   13443 out.go:177] * Verifying Kubernetes components...
	I0731 10:34:45.915610   13443 addons.go:423] installing /etc/kubernetes/addons/deployment.yaml
	I0731 10:34:45.918344   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/deployment.yaml (1003 bytes)
	I0731 10:34:45.918402   13443 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-764200
	I0731 10:34:45.918505   13443 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I0731 10:34:45.921256   13443 out.go:177]   - Using image registry.k8s.io/sig-storage/csi-attacher:v4.0.0
	I0731 10:34:45.922609   13443 out.go:177]   - Using image ghcr.io/inspektor-gadget/inspektor-gadget:v0.18.1
	I0731 10:34:45.923827   13443 addons.go:423] installing /etc/kubernetes/addons/ig-namespace.yaml
	I0731 10:34:45.923842   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-namespace.yaml (55 bytes)
	I0731 10:34:45.923903   13443 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-764200
	I0731 10:34:45.922590   13443 out.go:177]   - Using image registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.7.0
	I0731 10:34:45.925349   13443 out.go:177]   - Using image registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.6.0
	I0731 10:34:45.926690   13443 out.go:177]   - Using image registry.k8s.io/sig-storage/hostpathplugin:v1.9.0
	I0731 10:34:45.928029   13443 out.go:177]   - Using image registry.k8s.io/sig-storage/livenessprobe:v2.8.0
	I0731 10:34:45.929704   13443 out.go:177]   - Using image registry.k8s.io/sig-storage/csi-resizer:v1.6.0
	I0731 10:34:45.931208   13443 out.go:177]   - Using image registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0
	I0731 10:34:45.936029   13443 out.go:177]   - Using image registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
	I0731 10:34:45.937304   13443 addons.go:423] installing /etc/kubernetes/addons/rbac-external-attacher.yaml
	I0731 10:34:45.937321   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/rbac-external-attacher.yaml (3073 bytes)
	I0731 10:34:45.936579   13443 out.go:177]   - Using image ghcr.io/helm/tiller:v2.17.0
	I0731 10:34:45.938547   13443 addons.go:423] installing /etc/kubernetes/addons/helm-tiller-dp.yaml
	I0731 10:34:45.938562   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/helm-tiller-dp.yaml (2422 bytes)
	I0731 10:34:45.936589   13443 out.go:177]   - Using image gcr.io/k8s-minikube/storage-provisioner:v5
	I0731 10:34:45.939980   13443 addons.go:423] installing /etc/kubernetes/addons/storage-provisioner.yaml
	I0731 10:34:45.939995   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
	I0731 10:34:45.938606   13443 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-764200
	I0731 10:34:45.940010   13443 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/addons-764200/id_rsa Username:docker}
	I0731 10:34:45.940077   13443 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-764200
	I0731 10:34:45.936899   13443 host.go:66] Checking if "addons-764200" exists ...
	I0731 10:34:45.937381   13443 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-764200
	I0731 10:34:45.944206   13443 out.go:177]   - Using image docker.io/registry:2.8.1
	I0731 10:34:45.946612   13443 out.go:177]   - Using image gcr.io/k8s-minikube/kube-registry-proxy:0.0.5
	I0731 10:34:45.948097   13443 addons.go:423] installing /etc/kubernetes/addons/registry-rc.yaml
	I0731 10:34:45.948113   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-rc.yaml (798 bytes)
	I0731 10:34:45.948159   13443 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-764200
	I0731 10:34:45.954684   13443 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/addons-764200/id_rsa Username:docker}
	I0731 10:34:45.962387   13443 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/addons-764200/id_rsa Username:docker}
	I0731 10:34:45.963372   13443 addons.go:231] Setting addon default-storageclass=true in "addons-764200"
	I0731 10:34:45.963412   13443 host.go:66] Checking if "addons-764200" exists ...
	I0731 10:34:45.963872   13443 cli_runner.go:164] Run: docker container inspect addons-764200 --format={{.State.Status}}
	I0731 10:34:45.968928   13443 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/addons-764200/id_rsa Username:docker}
	I0731 10:34:45.977006   13443 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/addons-764200/id_rsa Username:docker}
	I0731 10:34:45.981822   13443 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/addons-764200/id_rsa Username:docker}
	I0731 10:34:45.984083   13443 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/addons-764200/id_rsa Username:docker}
	I0731 10:34:45.985187   13443 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/addons-764200/id_rsa Username:docker}
	I0731 10:34:45.986108   13443 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/addons-764200/id_rsa Username:docker}
	I0731 10:34:45.988182   13443 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/addons-764200/id_rsa Username:docker}
	I0731 10:34:45.992448   13443 addons.go:423] installing /etc/kubernetes/addons/storageclass.yaml
	I0731 10:34:45.992462   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
	I0731 10:34:45.992502   13443 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-764200
	I0731 10:34:46.017037   13443 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/addons-764200/id_rsa Username:docker}
	W0731 10:34:46.018378   13443 sshutil.go:64] dial failure (will retry): ssh: handshake failed: EOF
	I0731 10:34:46.018414   13443 retry.go:31] will retry after 162.111506ms: ssh: handshake failed: EOF
	I0731 10:34:46.109555   13443 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.27.3/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^        forward . \/etc\/resolv.conf.*/i \        hosts {\n           192.168.49.1 host.minikube.internal\n           fallthrough\n        }' -e '/^        errors *$/i \        log' | sudo /var/lib/minikube/binaries/v1.27.3/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
	I0731 10:34:46.110500   13443 node_ready.go:35] waiting up to 6m0s for node "addons-764200" to be "Ready" ...
	I0731 10:34:46.305650   13443 addons.go:423] installing /etc/kubernetes/addons/ig-serviceaccount.yaml
	I0731 10:34:46.305874   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-serviceaccount.yaml (80 bytes)
	I0731 10:34:46.305847   13443 addons.go:423] installing /etc/kubernetes/addons/rbac-hostpath.yaml
	I0731 10:34:46.305992   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/rbac-hostpath.yaml (4266 bytes)
	I0731 10:34:46.306276   13443 addons.go:423] installing /etc/kubernetes/addons/registry-svc.yaml
	I0731 10:34:46.306288   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-svc.yaml (398 bytes)
	I0731 10:34:46.315286   13443 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.27.3/kubectl apply -f /etc/kubernetes/addons/deployment.yaml
	I0731 10:34:46.315404   13443 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.27.3/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml
	I0731 10:34:46.315418   13443 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.27.3/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
	I0731 10:34:46.319010   13443 addons.go:423] installing /etc/kubernetes/addons/helm-tiller-rbac.yaml
	I0731 10:34:46.319032   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/helm-tiller-rbac.yaml (1188 bytes)
	I0731 10:34:46.323149   13443 addons.go:423] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml
	I0731 10:34:46.323169   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml (6471 bytes)
	I0731 10:34:46.404907   13443 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.27.3/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml
	I0731 10:34:46.416066   13443 addons.go:423] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
	I0731 10:34:46.416143   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1907 bytes)
	I0731 10:34:46.426811   13443 addons.go:423] installing /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml
	I0731 10:34:46.426885   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml (3038 bytes)
	I0731 10:34:46.504834   13443 addons.go:423] installing /etc/kubernetes/addons/registry-proxy.yaml
	I0731 10:34:46.504857   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-proxy.yaml (947 bytes)
	I0731 10:34:46.514272   13443 addons.go:423] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml
	I0731 10:34:46.514300   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml (23126 bytes)
	I0731 10:34:46.514826   13443 addons.go:423] installing /etc/kubernetes/addons/helm-tiller-svc.yaml
	I0731 10:34:46.514892   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/helm-tiller-svc.yaml (951 bytes)
	I0731 10:34:46.527513   13443 addons.go:423] installing /etc/kubernetes/addons/ig-role.yaml
	I0731 10:34:46.527586   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-role.yaml (210 bytes)
	I0731 10:34:46.605879   13443 addons.go:423] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
	I0731 10:34:46.605973   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
	I0731 10:34:46.613906   13443 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.27.3/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
	I0731 10:34:46.627979   13443 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.27.3/kubectl apply -f /etc/kubernetes/addons/helm-tiller-dp.yaml -f /etc/kubernetes/addons/helm-tiller-rbac.yaml -f /etc/kubernetes/addons/helm-tiller-svc.yaml
	I0731 10:34:46.715791   13443 addons.go:423] installing /etc/kubernetes/addons/rbac-external-provisioner.yaml
	I0731 10:34:46.715831   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/rbac-external-provisioner.yaml (4442 bytes)
	I0731 10:34:46.720117   13443 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.27.3/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml
	I0731 10:34:46.821780   13443 addons.go:423] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml
	I0731 10:34:46.821805   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml (19582 bytes)
	I0731 10:34:46.906790   13443 addons.go:423] installing /etc/kubernetes/addons/metrics-server-service.yaml
	I0731 10:34:46.906816   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
	I0731 10:34:46.909001   13443 addons.go:423] installing /etc/kubernetes/addons/ig-rolebinding.yaml
	I0731 10:34:46.909021   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-rolebinding.yaml (244 bytes)
	I0731 10:34:47.106623   13443 addons.go:423] installing /etc/kubernetes/addons/rbac-external-resizer.yaml
	I0731 10:34:47.106691   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/rbac-external-resizer.yaml (2943 bytes)
	I0731 10:34:47.115276   13443 addons.go:423] installing /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml
	I0731 10:34:47.115345   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml (3545 bytes)
	I0731 10:34:47.121381   13443 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.27.3/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
	I0731 10:34:47.206017   13443 addons.go:423] installing /etc/kubernetes/addons/ig-clusterrole.yaml
	I0731 10:34:47.206088   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-clusterrole.yaml (1485 bytes)
	I0731 10:34:47.324782   13443 addons.go:423] installing /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
	I0731 10:34:47.324868   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml (1475 bytes)
	I0731 10:34:47.505054   13443 addons.go:423] installing /etc/kubernetes/addons/ig-clusterrolebinding.yaml
	I0731 10:34:47.505080   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-clusterrolebinding.yaml (274 bytes)
	I0731 10:34:47.613211   13443 addons.go:423] installing /etc/kubernetes/addons/rbac-external-snapshotter.yaml
	I0731 10:34:47.613241   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/rbac-external-snapshotter.yaml (3149 bytes)
	I0731 10:34:47.619917   13443 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.27.3/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
	I0731 10:34:47.717826   13443 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.27.3/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^        forward . \/etc\/resolv.conf.*/i \        hosts {\n           192.168.49.1 host.minikube.internal\n           fallthrough\n        }' -e '/^        errors *$/i \        log' | sudo /var/lib/minikube/binaries/v1.27.3/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -": (1.608224974s)
	I0731 10:34:47.717920   13443 start.go:901] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
	I0731 10:34:47.909002   13443 addons.go:423] installing /etc/kubernetes/addons/csi-hostpath-attacher.yaml
	I0731 10:34:47.909081   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-attacher.yaml (2143 bytes)
	I0731 10:34:48.024894   13443 addons.go:423] installing /etc/kubernetes/addons/ig-crd.yaml
	I0731 10:34:48.024968   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-crd.yaml (5216 bytes)
	I0731 10:34:48.124969   13443 node_ready.go:58] node "addons-764200" has status "Ready":"False"
	I0731 10:34:48.305995   13443 addons.go:423] installing /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml
	I0731 10:34:48.306101   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml (1274 bytes)
	I0731 10:34:48.408243   13443 addons.go:423] installing /etc/kubernetes/addons/ig-daemonset.yaml
	I0731 10:34:48.408327   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-daemonset.yaml (7741 bytes)
	I0731 10:34:48.606904   13443 addons.go:423] installing /etc/kubernetes/addons/csi-hostpath-plugin.yaml
	I0731 10:34:48.606933   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-plugin.yaml (8201 bytes)
	I0731 10:34:48.708155   13443 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.27.3/kubectl apply -f /etc/kubernetes/addons/ig-namespace.yaml -f /etc/kubernetes/addons/ig-serviceaccount.yaml -f /etc/kubernetes/addons/ig-role.yaml -f /etc/kubernetes/addons/ig-rolebinding.yaml -f /etc/kubernetes/addons/ig-clusterrole.yaml -f /etc/kubernetes/addons/ig-clusterrolebinding.yaml -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-daemonset.yaml
	I0731 10:34:49.009533   13443 addons.go:423] installing /etc/kubernetes/addons/csi-hostpath-resizer.yaml
	I0731 10:34:49.009561   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-resizer.yaml (2191 bytes)
	I0731 10:34:49.309415   13443 addons.go:423] installing /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
	I0731 10:34:49.309451   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-storageclass.yaml (846 bytes)
	I0731 10:34:49.520927   13443 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.27.3/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
	I0731 10:34:50.219795   13443 node_ready.go:58] node "addons-764200" has status "Ready":"False"
	I0731 10:34:50.619279   13443 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.27.3/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (4.303835122s)
	I0731 10:34:50.619430   13443 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.27.3/kubectl apply -f /etc/kubernetes/addons/deployment.yaml: (4.30410095s)
	I0731 10:34:50.619474   13443 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.27.3/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml: (4.304056525s)
	I0731 10:34:51.735031   13443 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.27.3/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml: (5.330082939s)
	I0731 10:34:51.735070   13443 addons.go:467] Verifying addon ingress=true in "addons-764200"
	I0731 10:34:51.735117   13443 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.27.3/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: (5.121118816s)
	I0731 10:34:51.736836   13443 out.go:177] * Verifying ingress addon...
	I0731 10:34:51.735187   13443 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.27.3/kubectl apply -f /etc/kubernetes/addons/helm-tiller-dp.yaml -f /etc/kubernetes/addons/helm-tiller-rbac.yaml -f /etc/kubernetes/addons/helm-tiller-svc.yaml: (5.107124317s)
	I0731 10:34:51.735259   13443 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.27.3/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml: (5.015118938s)
	I0731 10:34:51.735312   13443 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.27.3/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (4.613866342s)
	I0731 10:34:51.735412   13443 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.27.3/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (4.115411537s)
	I0731 10:34:51.735491   13443 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.27.3/kubectl apply -f /etc/kubernetes/addons/ig-namespace.yaml -f /etc/kubernetes/addons/ig-serviceaccount.yaml -f /etc/kubernetes/addons/ig-role.yaml -f /etc/kubernetes/addons/ig-rolebinding.yaml -f /etc/kubernetes/addons/ig-clusterrole.yaml -f /etc/kubernetes/addons/ig-clusterrolebinding.yaml -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-daemonset.yaml: (3.027245778s)
	I0731 10:34:51.738334   13443 addons.go:467] Verifying addon metrics-server=true in "addons-764200"
	I0731 10:34:51.738357   13443 addons.go:467] Verifying addon registry=true in "addons-764200"
	I0731 10:34:51.740439   13443 out.go:177] * Verifying registry addon...
	W0731 10:34:51.738363   13443 addons.go:449] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.27.3/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
	stdout:
	customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
	customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
	customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
	serviceaccount/snapshot-controller created
	clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
	clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
	role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
	rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
	deployment.apps/snapshot-controller created
	
	stderr:
	error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
	ensure CRDs are installed first
	I0731 10:34:51.738978   13443 kapi.go:75] Waiting for pod with label "app.kubernetes.io/name=ingress-nginx" in ns "ingress-nginx" ...
	I0731 10:34:51.742104   13443 retry.go:31] will retry after 222.926523ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.27.3/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
	stdout:
	customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
	customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
	customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
	serviceaccount/snapshot-controller created
	clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
	clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
	role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
	rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
	deployment.apps/snapshot-controller created
	
	stderr:
	error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
	ensure CRDs are installed first
	I0731 10:34:51.742740   13443 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=registry" in ns "kube-system" ...
	I0731 10:34:51.746808   13443 kapi.go:86] Found 3 Pods for label selector app.kubernetes.io/name=ingress-nginx
	I0731 10:34:51.746826   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:34:51.806508   13443 kapi.go:86] Found 1 Pods for label selector kubernetes.io/minikube-addons=registry
	I0731 10:34:51.806535   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:34:51.807729   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:34:51.809746   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:34:51.966235   13443 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.27.3/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
	I0731 10:34:52.313515   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:34:52.315432   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:34:52.538068   13443 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.27.3/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml: (3.017027861s)
	I0731 10:34:52.538111   13443 addons.go:467] Verifying addon csi-hostpath-driver=true in "addons-764200"
	I0731 10:34:52.540163   13443 out.go:177] * Verifying csi-hostpath-driver addon...
	I0731 10:34:52.542491   13443 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=csi-hostpath-driver" in ns "kube-system" ...
	I0731 10:34:52.545823   13443 kapi.go:86] Found 2 Pods for label selector kubernetes.io/minikube-addons=csi-hostpath-driver
	I0731 10:34:52.545848   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:34:52.548614   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:34:52.620378   13443 node_ready.go:58] node "addons-764200" has status "Ready":"False"
	I0731 10:34:52.746289   13443 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_application_credentials.json (162 bytes)
	I0731 10:34:52.746348   13443 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-764200
	I0731 10:34:52.761863   13443 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/addons-764200/id_rsa Username:docker}
	I0731 10:34:52.812444   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:34:52.814357   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:34:52.862891   13443 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_cloud_project (12 bytes)
	I0731 10:34:52.878529   13443 addons.go:231] Setting addon gcp-auth=true in "addons-764200"
	I0731 10:34:52.878595   13443 host.go:66] Checking if "addons-764200" exists ...
	I0731 10:34:52.878966   13443 cli_runner.go:164] Run: docker container inspect addons-764200 --format={{.State.Status}}
	I0731 10:34:52.893874   13443 ssh_runner.go:195] Run: cat /var/lib/minikube/google_application_credentials.json
	I0731 10:34:52.893922   13443 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-764200
	I0731 10:34:52.910423   13443 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/addons-764200/id_rsa Username:docker}
	I0731 10:34:53.027375   13443 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.27.3/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (1.061089578s)
	I0731 10:34:53.029048   13443 out.go:177]   - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20230407
	I0731 10:34:53.030588   13443 out.go:177]   - Using image gcr.io/k8s-minikube/gcp-auth-webhook:v0.1.0
	I0731 10:34:53.032013   13443 addons.go:423] installing /etc/kubernetes/addons/gcp-auth-ns.yaml
	I0731 10:34:53.032029   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/gcp-auth-ns.yaml (700 bytes)
	I0731 10:34:53.047377   13443 addons.go:423] installing /etc/kubernetes/addons/gcp-auth-service.yaml
	I0731 10:34:53.047397   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/gcp-auth-service.yaml (788 bytes)
	I0731 10:34:53.052596   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:34:53.062513   13443 addons.go:423] installing /etc/kubernetes/addons/gcp-auth-webhook.yaml
	I0731 10:34:53.062532   13443 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/gcp-auth-webhook.yaml (5412 bytes)
	I0731 10:34:53.076786   13443 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.27.3/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml
	I0731 10:34:53.312709   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:34:53.313886   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:34:53.609215   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:34:53.812245   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:34:53.825250   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:34:54.109429   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:34:54.312372   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:34:54.314136   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:34:54.619716   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:34:54.623338   13443 node_ready.go:58] node "addons-764200" has status "Ready":"False"
	I0731 10:34:54.907528   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:34:54.908622   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:34:54.913162   13443 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.27.3/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml: (1.836321225s)
	I0731 10:34:54.914025   13443 addons.go:467] Verifying addon gcp-auth=true in "addons-764200"
	I0731 10:34:54.915835   13443 out.go:177] * Verifying gcp-auth addon...
	I0731 10:34:54.918499   13443 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=gcp-auth" in ns "gcp-auth" ...
	I0731 10:34:55.006535   13443 kapi.go:86] Found 1 Pods for label selector kubernetes.io/minikube-addons=gcp-auth
	I0731 10:34:55.006566   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:34:55.009420   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:34:55.112484   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:34:55.315996   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:34:55.317306   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:34:55.512794   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:34:55.608968   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:34:55.812242   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:34:55.814084   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:34:56.013737   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:34:56.111394   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:34:56.312251   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:34:56.314843   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:34:56.513132   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:34:56.609254   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:34:56.813412   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:34:56.813564   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:34:57.013898   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:34:57.109450   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:34:57.120033   13443 node_ready.go:58] node "addons-764200" has status "Ready":"False"
	I0731 10:34:57.314961   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:34:57.315262   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:34:57.513985   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:34:57.607278   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:34:57.811762   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:34:57.813774   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:34:58.013445   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:34:58.107829   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:34:58.311471   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:34:58.313576   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:34:58.512839   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:34:58.553799   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:34:58.812534   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:34:58.813698   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:34:59.013294   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:34:59.053251   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:34:59.120827   13443 node_ready.go:58] node "addons-764200" has status "Ready":"False"
	I0731 10:34:59.312513   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:34:59.313133   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:34:59.512989   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:34:59.553114   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:34:59.812269   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:34:59.814216   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:00.013287   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:00.052867   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:00.312285   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:00.313344   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:00.513233   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:00.552519   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:00.811885   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:00.813786   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:01.012714   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:01.053103   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:01.121119   13443 node_ready.go:58] node "addons-764200" has status "Ready":"False"
	I0731 10:35:01.311534   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:01.313607   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:01.512818   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:01.553745   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:01.811801   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:01.813336   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:02.013966   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:02.053532   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:02.311572   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:02.313340   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:02.513174   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:02.552464   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:02.812534   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:02.813321   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:03.013717   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:03.052756   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:03.311556   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:03.313426   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:03.513009   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:03.552224   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:03.620536   13443 node_ready.go:58] node "addons-764200" has status "Ready":"False"
	I0731 10:35:03.811821   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:03.813563   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:04.012518   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:04.052386   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:04.311997   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:04.313416   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:04.512268   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:04.552232   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:04.811546   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:04.814268   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:05.012972   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:05.052140   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:05.311599   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:05.313326   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:05.512943   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:05.553026   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:05.811377   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:05.812934   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:06.012632   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:06.052642   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:06.120299   13443 node_ready.go:58] node "addons-764200" has status "Ready":"False"
	I0731 10:35:06.311512   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:06.313367   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:06.513135   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:06.552108   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:06.812415   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:06.813446   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:07.013271   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:07.052059   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:07.311983   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:07.313585   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:07.512286   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:07.552426   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:07.811797   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:07.813735   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:08.012427   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:08.052517   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:08.311799   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:08.313657   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:08.512579   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:08.552467   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:08.619995   13443 node_ready.go:58] node "addons-764200" has status "Ready":"False"
	I0731 10:35:08.811630   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:08.813410   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:09.012162   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:09.052290   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:09.312141   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:09.313764   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:09.512496   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:09.552335   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:09.811748   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:09.813688   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:10.012101   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:10.051874   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:10.311359   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:10.313340   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:10.513162   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:10.552249   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:10.620419   13443 node_ready.go:58] node "addons-764200" has status "Ready":"False"
	I0731 10:35:10.811810   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:10.813504   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:11.013275   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:11.052175   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:11.311571   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:11.313350   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:11.513030   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:11.553581   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:11.813184   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:11.814316   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:12.012861   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:12.053143   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:12.311597   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:12.313392   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:12.513478   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:12.552579   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:12.812024   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:12.813655   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:13.012565   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:13.052554   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:13.119493   13443 node_ready.go:58] node "addons-764200" has status "Ready":"False"
	I0731 10:35:13.311893   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:13.314384   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:13.513421   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:13.552281   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:13.812177   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:13.812922   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:14.013108   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:14.052327   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:14.312601   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:14.313845   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:14.512736   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:14.553074   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:14.811527   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:14.813428   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:15.013239   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:15.051943   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:15.120354   13443 node_ready.go:58] node "addons-764200" has status "Ready":"False"
	I0731 10:35:15.311873   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:15.313447   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:15.512752   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:15.553235   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:15.811758   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:15.813728   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:16.012730   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:16.053082   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:16.311624   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:16.313313   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:16.513017   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:16.552092   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:16.811713   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:16.813348   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:17.013190   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:17.052518   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:17.312139   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:17.313171   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:17.512836   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:17.552611   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:17.619644   13443 node_ready.go:58] node "addons-764200" has status "Ready":"False"
	I0731 10:35:17.812005   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:17.813569   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:18.012327   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:18.052733   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:18.312056   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:18.313630   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:18.512420   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:18.552498   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:18.812197   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:18.813968   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:19.012874   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:19.052959   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:19.319575   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:19.336418   13443 kapi.go:86] Found 2 Pods for label selector kubernetes.io/minikube-addons=registry
	I0731 10:35:19.336496   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:19.516595   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:19.553783   13443 kapi.go:86] Found 3 Pods for label selector kubernetes.io/minikube-addons=csi-hostpath-driver
	I0731 10:35:19.553806   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:19.624984   13443 node_ready.go:49] node "addons-764200" has status "Ready":"True"
	I0731 10:35:19.625009   13443 node_ready.go:38] duration metric: took 33.514356822s waiting for node "addons-764200" to be "Ready" ...
	I0731 10:35:19.625021   13443 pod_ready.go:35] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
	I0731 10:35:19.637970   13443 pod_ready.go:78] waiting up to 6m0s for pod "coredns-5d78c9869d-9x8dj" in "kube-system" namespace to be "Ready" ...
	I0731 10:35:19.811774   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:19.813679   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:20.013419   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:20.054976   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:20.312714   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:20.314450   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:20.514288   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:20.553182   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:20.812803   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:20.815260   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:21.013863   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:21.114462   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:21.316192   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:21.318082   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:21.513030   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:21.553976   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:21.714826   13443 pod_ready.go:92] pod "coredns-5d78c9869d-9x8dj" in "kube-system" namespace has status "Ready":"True"
	I0731 10:35:21.714854   13443 pod_ready.go:81] duration metric: took 2.076850837s waiting for pod "coredns-5d78c9869d-9x8dj" in "kube-system" namespace to be "Ready" ...
	I0731 10:35:21.714880   13443 pod_ready.go:78] waiting up to 6m0s for pod "etcd-addons-764200" in "kube-system" namespace to be "Ready" ...
	I0731 10:35:21.719472   13443 pod_ready.go:92] pod "etcd-addons-764200" in "kube-system" namespace has status "Ready":"True"
	I0731 10:35:21.719490   13443 pod_ready.go:81] duration metric: took 4.593041ms waiting for pod "etcd-addons-764200" in "kube-system" namespace to be "Ready" ...
	I0731 10:35:21.719501   13443 pod_ready.go:78] waiting up to 6m0s for pod "kube-apiserver-addons-764200" in "kube-system" namespace to be "Ready" ...
	I0731 10:35:21.724704   13443 pod_ready.go:92] pod "kube-apiserver-addons-764200" in "kube-system" namespace has status "Ready":"True"
	I0731 10:35:21.724720   13443 pod_ready.go:81] duration metric: took 5.213413ms waiting for pod "kube-apiserver-addons-764200" in "kube-system" namespace to be "Ready" ...
	I0731 10:35:21.724728   13443 pod_ready.go:78] waiting up to 6m0s for pod "kube-controller-manager-addons-764200" in "kube-system" namespace to be "Ready" ...
	I0731 10:35:21.729553   13443 pod_ready.go:92] pod "kube-controller-manager-addons-764200" in "kube-system" namespace has status "Ready":"True"
	I0731 10:35:21.729576   13443 pod_ready.go:81] duration metric: took 4.840825ms waiting for pod "kube-controller-manager-addons-764200" in "kube-system" namespace to be "Ready" ...
	I0731 10:35:21.729591   13443 pod_ready.go:78] waiting up to 6m0s for pod "kube-proxy-m4bbq" in "kube-system" namespace to be "Ready" ...
	I0731 10:35:21.812824   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:21.814538   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:22.013279   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:22.020259   13443 pod_ready.go:92] pod "kube-proxy-m4bbq" in "kube-system" namespace has status "Ready":"True"
	I0731 10:35:22.020277   13443 pod_ready.go:81] duration metric: took 290.678816ms waiting for pod "kube-proxy-m4bbq" in "kube-system" namespace to be "Ready" ...
	I0731 10:35:22.020287   13443 pod_ready.go:78] waiting up to 6m0s for pod "kube-scheduler-addons-764200" in "kube-system" namespace to be "Ready" ...
	I0731 10:35:22.054165   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:22.312246   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:22.314257   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:22.421532   13443 pod_ready.go:92] pod "kube-scheduler-addons-764200" in "kube-system" namespace has status "Ready":"True"
	I0731 10:35:22.421558   13443 pod_ready.go:81] duration metric: took 401.263702ms waiting for pod "kube-scheduler-addons-764200" in "kube-system" namespace to be "Ready" ...
	I0731 10:35:22.421572   13443 pod_ready.go:78] waiting up to 6m0s for pod "metrics-server-7746886d4f-hxf8x" in "kube-system" namespace to be "Ready" ...
	I0731 10:35:22.513249   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:22.554374   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:22.812863   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:22.814799   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:23.012626   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:23.055100   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:23.312757   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:23.314509   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:23.513414   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:23.554027   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:23.813203   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:23.815075   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:24.013121   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:24.053318   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:24.312442   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:24.313541   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:24.513143   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:24.553206   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:24.726212   13443 pod_ready.go:102] pod "metrics-server-7746886d4f-hxf8x" in "kube-system" namespace has status "Ready":"False"
	I0731 10:35:24.811726   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:24.813726   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:25.012976   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:25.052953   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:25.313051   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:25.315528   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:25.513694   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:25.553737   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:25.812749   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:25.815054   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:26.013250   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:26.053842   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:26.312630   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:26.314243   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:26.514009   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:26.553323   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:26.727146   13443 pod_ready.go:102] pod "metrics-server-7746886d4f-hxf8x" in "kube-system" namespace has status "Ready":"False"
	I0731 10:35:26.811948   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:26.814591   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:27.013450   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:27.054098   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:27.312194   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:27.314531   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:27.513357   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:27.555156   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:27.811766   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:27.813829   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:28.012521   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:28.053633   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:28.312308   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:28.314429   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:28.512867   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:28.554184   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:28.811649   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:28.814156   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:29.012454   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:29.053823   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:29.228628   13443 pod_ready.go:102] pod "metrics-server-7746886d4f-hxf8x" in "kube-system" namespace has status "Ready":"False"
	I0731 10:35:29.312637   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:29.314699   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:29.513292   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:29.554013   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:29.811860   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:29.814321   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:30.013526   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:30.053623   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:30.312606   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:30.314248   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:30.512953   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:30.554935   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:30.812785   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:30.814689   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:31.013830   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:31.054423   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:31.311939   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:31.314305   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:31.513080   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:31.553502   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:31.727249   13443 pod_ready.go:102] pod "metrics-server-7746886d4f-hxf8x" in "kube-system" namespace has status "Ready":"False"
	I0731 10:35:31.812404   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:31.815847   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:32.013222   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:32.054463   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:32.312454   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:32.314680   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:32.513742   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:32.555077   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:32.812753   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:32.816852   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:33.013206   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:33.055074   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:33.311839   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:33.314087   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:33.513041   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:33.554700   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:33.727840   13443 pod_ready.go:102] pod "metrics-server-7746886d4f-hxf8x" in "kube-system" namespace has status "Ready":"False"
	I0731 10:35:33.812340   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:33.814949   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:34.013124   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:34.053516   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:34.312501   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:34.316252   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:34.512896   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:34.554549   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:34.812099   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:34.814140   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:35.012634   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:35.085950   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:35.408198   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:35.408231   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:35.533298   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:35.555096   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:35.811981   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:35.814431   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0731 10:35:36.012884   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:36.053860   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:36.226873   13443 pod_ready.go:102] pod "metrics-server-7746886d4f-hxf8x" in "kube-system" namespace has status "Ready":"False"
	I0731 10:35:36.312712   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:36.313614   13443 kapi.go:107] duration metric: took 44.570873275s to wait for kubernetes.io/minikube-addons=registry ...
	I0731 10:35:36.513488   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:36.557847   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:36.812810   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:37.013327   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:37.053487   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:37.311099   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:37.515215   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:37.555405   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:37.811611   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:38.013026   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:38.053230   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:38.227173   13443 pod_ready.go:102] pod "metrics-server-7746886d4f-hxf8x" in "kube-system" namespace has status "Ready":"False"
	I0731 10:35:38.311647   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:38.512447   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:38.554271   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:38.812268   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:39.013111   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:39.053602   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:39.315497   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:39.512787   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:39.553937   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:39.727582   13443 pod_ready.go:92] pod "metrics-server-7746886d4f-hxf8x" in "kube-system" namespace has status "Ready":"True"
	I0731 10:35:39.727604   13443 pod_ready.go:81] duration metric: took 17.306024229s waiting for pod "metrics-server-7746886d4f-hxf8x" in "kube-system" namespace to be "Ready" ...
	I0731 10:35:39.727628   13443 pod_ready.go:38] duration metric: took 20.10259148s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
	I0731 10:35:39.727646   13443 api_server.go:52] waiting for apiserver process to appear ...
	I0731 10:35:39.727698   13443 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I0731 10:35:39.740389   13443 api_server.go:72] duration metric: took 53.824932638s to wait for apiserver process to appear ...
	I0731 10:35:39.740412   13443 api_server.go:88] waiting for apiserver healthz status ...
	I0731 10:35:39.740432   13443 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
	I0731 10:35:39.747310   13443 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
	ok
	I0731 10:35:39.748574   13443 api_server.go:141] control plane version: v1.27.3
	I0731 10:35:39.748597   13443 api_server.go:131] duration metric: took 8.178375ms to wait for apiserver health ...
	I0731 10:35:39.748607   13443 system_pods.go:43] waiting for kube-system pods to appear ...
	I0731 10:35:39.756347   13443 system_pods.go:59] 18 kube-system pods found
	I0731 10:35:39.756377   13443 system_pods.go:61] "coredns-5d78c9869d-9x8dj" [bb3537d1-1ef0-4cf7-8d42-8605616c5577] Running
	I0731 10:35:39.756384   13443 system_pods.go:61] "csi-hostpath-attacher-0" [37cfb372-b7b7-4a1a-9e40-22bb08270ff8] Running
	I0731 10:35:39.756391   13443 system_pods.go:61] "csi-hostpath-resizer-0" [04f03748-89fc-4791-978a-151ab5d739c9] Running
	I0731 10:35:39.756403   13443 system_pods.go:61] "csi-hostpathplugin-lfdlt" [eeecf89d-6f5d-4133-9d71-0f0af40ba56b] Pending / Ready:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter])
	I0731 10:35:39.756412   13443 system_pods.go:61] "etcd-addons-764200" [5d7b9fd6-d823-4c01-986f-d30ddc063a95] Running
	I0731 10:35:39.756422   13443 system_pods.go:61] "kindnet-j2fcw" [5a7a677d-4d43-4fd6-a592-ec2176e928cc] Running
	I0731 10:35:39.756428   13443 system_pods.go:61] "kube-apiserver-addons-764200" [75a7f5d3-f1f2-4d7e-9461-7fb31e9b197b] Running
	I0731 10:35:39.756438   13443 system_pods.go:61] "kube-controller-manager-addons-764200" [273e819c-6130-4b60-841d-2609f205c4c5] Running
	I0731 10:35:39.756451   13443 system_pods.go:61] "kube-ingress-dns-minikube" [e033e139-c37e-4e6c-a59d-13b69a05c5db] Pending / Ready:ContainersNotReady (containers with unready status: [minikube-ingress-dns]) / ContainersReady:ContainersNotReady (containers with unready status: [minikube-ingress-dns])
	I0731 10:35:39.756464   13443 system_pods.go:61] "kube-proxy-m4bbq" [31199b78-cbec-4591-93f5-a1de795b1593] Running
	I0731 10:35:39.756471   13443 system_pods.go:61] "kube-scheduler-addons-764200" [0b430567-01b2-458d-8e94-210c0eefdf19] Running
	I0731 10:35:39.756478   13443 system_pods.go:61] "metrics-server-7746886d4f-hxf8x" [e382d580-9b99-4052-af8a-b12f7feb77c8] Running
	I0731 10:35:39.756489   13443 system_pods.go:61] "registry-6jvkx" [30f6df32-a1ae-4320-9d21-a4ba4e21c885] Running
	I0731 10:35:39.756495   13443 system_pods.go:61] "registry-proxy-x29w4" [babf24fb-dbb5-4870-af51-93a0c43cd64a] Running
	I0731 10:35:39.756508   13443 system_pods.go:61] "snapshot-controller-75bbb956b9-qw2z2" [b6fd8007-61e4-47e6-9231-491a31da3743] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
	I0731 10:35:39.756517   13443 system_pods.go:61] "snapshot-controller-75bbb956b9-r4878" [83623039-8680-4dd5-beb6-fe989676924c] Running
	I0731 10:35:39.756523   13443 system_pods.go:61] "storage-provisioner" [f7e9bac0-33fa-496c-b29f-c7c58b98f533] Running
	I0731 10:35:39.756527   13443 system_pods.go:61] "tiller-deploy-6847666dc-znc64" [638f1f2a-cd4f-4f07-8b4c-603fd5bafca3] Running
	I0731 10:35:39.756531   13443 system_pods.go:74] duration metric: took 7.919684ms to wait for pod list to return data ...
	I0731 10:35:39.756539   13443 default_sa.go:34] waiting for default service account to be created ...
	I0731 10:35:39.758707   13443 default_sa.go:45] found service account: "default"
	I0731 10:35:39.758723   13443 default_sa.go:55] duration metric: took 2.179238ms for default service account to be created ...
	I0731 10:35:39.758728   13443 system_pods.go:116] waiting for k8s-apps to be running ...
	I0731 10:35:39.766923   13443 system_pods.go:86] 18 kube-system pods found
	I0731 10:35:39.766945   13443 system_pods.go:89] "coredns-5d78c9869d-9x8dj" [bb3537d1-1ef0-4cf7-8d42-8605616c5577] Running
	I0731 10:35:39.766951   13443 system_pods.go:89] "csi-hostpath-attacher-0" [37cfb372-b7b7-4a1a-9e40-22bb08270ff8] Running
	I0731 10:35:39.766956   13443 system_pods.go:89] "csi-hostpath-resizer-0" [04f03748-89fc-4791-978a-151ab5d739c9] Running
	I0731 10:35:39.766963   13443 system_pods.go:89] "csi-hostpathplugin-lfdlt" [eeecf89d-6f5d-4133-9d71-0f0af40ba56b] Pending / Ready:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter])
	I0731 10:35:39.766968   13443 system_pods.go:89] "etcd-addons-764200" [5d7b9fd6-d823-4c01-986f-d30ddc063a95] Running
	I0731 10:35:39.766974   13443 system_pods.go:89] "kindnet-j2fcw" [5a7a677d-4d43-4fd6-a592-ec2176e928cc] Running
	I0731 10:35:39.766978   13443 system_pods.go:89] "kube-apiserver-addons-764200" [75a7f5d3-f1f2-4d7e-9461-7fb31e9b197b] Running
	I0731 10:35:39.766983   13443 system_pods.go:89] "kube-controller-manager-addons-764200" [273e819c-6130-4b60-841d-2609f205c4c5] Running
	I0731 10:35:39.766991   13443 system_pods.go:89] "kube-ingress-dns-minikube" [e033e139-c37e-4e6c-a59d-13b69a05c5db] Pending / Ready:ContainersNotReady (containers with unready status: [minikube-ingress-dns]) / ContainersReady:ContainersNotReady (containers with unready status: [minikube-ingress-dns])
	I0731 10:35:39.766999   13443 system_pods.go:89] "kube-proxy-m4bbq" [31199b78-cbec-4591-93f5-a1de795b1593] Running
	I0731 10:35:39.767003   13443 system_pods.go:89] "kube-scheduler-addons-764200" [0b430567-01b2-458d-8e94-210c0eefdf19] Running
	I0731 10:35:39.767012   13443 system_pods.go:89] "metrics-server-7746886d4f-hxf8x" [e382d580-9b99-4052-af8a-b12f7feb77c8] Running
	I0731 10:35:39.767017   13443 system_pods.go:89] "registry-6jvkx" [30f6df32-a1ae-4320-9d21-a4ba4e21c885] Running
	I0731 10:35:39.767021   13443 system_pods.go:89] "registry-proxy-x29w4" [babf24fb-dbb5-4870-af51-93a0c43cd64a] Running
	I0731 10:35:39.767028   13443 system_pods.go:89] "snapshot-controller-75bbb956b9-qw2z2" [b6fd8007-61e4-47e6-9231-491a31da3743] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
	I0731 10:35:39.767036   13443 system_pods.go:89] "snapshot-controller-75bbb956b9-r4878" [83623039-8680-4dd5-beb6-fe989676924c] Running
	I0731 10:35:39.767040   13443 system_pods.go:89] "storage-provisioner" [f7e9bac0-33fa-496c-b29f-c7c58b98f533] Running
	I0731 10:35:39.767045   13443 system_pods.go:89] "tiller-deploy-6847666dc-znc64" [638f1f2a-cd4f-4f07-8b4c-603fd5bafca3] Running
	I0731 10:35:39.767050   13443 system_pods.go:126] duration metric: took 8.317832ms to wait for k8s-apps to be running ...
	I0731 10:35:39.767056   13443 system_svc.go:44] waiting for kubelet service to be running ....
	I0731 10:35:39.767091   13443 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I0731 10:35:39.778581   13443 system_svc.go:56] duration metric: took 11.518152ms WaitForService to wait for kubelet.
	I0731 10:35:39.778600   13443 kubeadm.go:581] duration metric: took 53.863145117s to wait for : map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] ...
	I0731 10:35:39.778620   13443 node_conditions.go:102] verifying NodePressure condition ...
	I0731 10:35:39.781306   13443 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
	I0731 10:35:39.781324   13443 node_conditions.go:123] node cpu capacity is 8
	I0731 10:35:39.781335   13443 node_conditions.go:105] duration metric: took 2.710219ms to run NodePressure ...
	I0731 10:35:39.781344   13443 start.go:228] waiting for startup goroutines ...
	I0731 10:35:39.811492   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:40.012780   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:40.054281   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:40.312393   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:40.513580   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:40.554145   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:40.812091   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:41.014317   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:41.110554   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:41.312750   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:41.515339   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:41.609934   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:41.812881   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:42.013789   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:42.110074   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:42.311935   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:42.513467   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:42.554568   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:42.812647   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:43.013614   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:43.054691   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:43.311586   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:43.513286   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:43.553968   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:43.812415   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:44.013697   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:44.056911   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:44.313044   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:44.513413   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:44.553728   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:44.811551   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:45.012738   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:45.053720   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:45.311260   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:45.512884   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:45.555228   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:45.812500   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:46.013278   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0731 10:35:46.056308   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:46.312166   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:46.512858   13443 kapi.go:107] duration metric: took 51.594349083s to wait for kubernetes.io/minikube-addons=gcp-auth ...
	I0731 10:35:46.515521   13443 out.go:177] * Your GCP credentials will now be mounted into every pod created in the addons-764200 cluster.
	I0731 10:35:46.517186   13443 out.go:177] * If you don't want your credentials mounted into a specific pod, add a label with the `gcp-auth-skip-secret` key to your pod configuration.
	I0731 10:35:46.518788   13443 out.go:177] * If you want existing pods to be mounted with credentials, either recreate them or rerun addons enable with --refresh.
	I0731 10:35:46.556783   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:46.812464   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:47.054318   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:47.311847   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:47.554736   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:47.813125   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:48.054088   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:48.312333   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:48.553701   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:48.814044   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:49.108191   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:49.311765   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:49.553605   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:49.812554   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:50.053379   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:50.312217   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:50.554147   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:50.814313   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:51.053588   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:51.311529   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:51.553562   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:51.812642   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:52.052864   13443 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0731 10:35:52.311664   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:52.553675   13443 kapi.go:107] duration metric: took 1m0.01118343s to wait for kubernetes.io/minikube-addons=csi-hostpath-driver ...
	I0731 10:35:52.816782   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:53.313263   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:53.813332   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:54.318930   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:54.812398   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:55.311677   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:55.812449   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:56.312101   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:56.811762   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:57.312421   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:57.811948   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:58.312389   13443 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0731 10:35:58.812226   13443 kapi.go:107] duration metric: took 1m7.07324349s to wait for app.kubernetes.io/name=ingress-nginx ...
	I0731 10:35:58.814606   13443 out.go:177] * Enabled addons: storage-provisioner, cloud-spanner, ingress-dns, default-storageclass, helm-tiller, inspektor-gadget, metrics-server, volumesnapshots, registry, gcp-auth, csi-hostpath-driver, ingress
	I0731 10:35:58.816188   13443 addons.go:502] enable addons completed in 1m12.950766984s: enabled=[storage-provisioner cloud-spanner ingress-dns default-storageclass helm-tiller inspektor-gadget metrics-server volumesnapshots registry gcp-auth csi-hostpath-driver ingress]
	I0731 10:35:58.816233   13443 start.go:233] waiting for cluster config update ...
	I0731 10:35:58.816261   13443 start.go:242] writing updated cluster config ...
	I0731 10:35:58.816538   13443 ssh_runner.go:195] Run: rm -f paused
	I0731 10:35:58.865128   13443 start.go:596] kubectl: 1.27.4, cluster: 1.27.3 (minor skew: 0)
	I0731 10:35:58.867204   13443 out.go:177] * Done! kubectl is now configured to use "addons-764200" cluster and "default" namespace by default
	
	* 
	* ==> CRI-O <==
	* Jul 31 10:38:35 addons-764200 crio[950]: time="2023-07-31 10:38:35.263227540Z" level=info msg="Pulled image: gcr.io/google-samples/hello-app@sha256:845f77fab71033404f4cfceaa1ddb27b70c3551ceb22a5e7f4498cdda6c9daea" id=3567755e-d224-440c-88c8-a1078589486e name=/runtime.v1.ImageService/PullImage
	Jul 31 10:38:35 addons-764200 crio[950]: time="2023-07-31 10:38:35.263993728Z" level=info msg="Checking image status: gcr.io/google-samples/hello-app:1.0" id=6bf0fa4e-76b5-47e7-a558-d69336a86383 name=/runtime.v1.ImageService/ImageStatus
	Jul 31 10:38:35 addons-764200 crio[950]: time="2023-07-31 10:38:35.264630401Z" level=info msg="Image status: &ImageStatusResponse{Image:&Image{Id:13753a81eccfdd153bf7fc9a4c9198edbcce0110e7f46ed0d38cc654a6458ff5,RepoTags:[gcr.io/google-samples/hello-app:1.0],RepoDigests:[gcr.io/google-samples/hello-app@sha256:845f77fab71033404f4cfceaa1ddb27b70c3551ceb22a5e7f4498cdda6c9daea],Size_:28496999,Uid:nil,Username:nonroot,Spec:nil,},Info:map[string]string{},}" id=6bf0fa4e-76b5-47e7-a558-d69336a86383 name=/runtime.v1.ImageService/ImageStatus
	Jul 31 10:38:35 addons-764200 crio[950]: time="2023-07-31 10:38:35.265368719Z" level=info msg="Creating container: default/hello-world-app-65bdb79f98-f9nkc/hello-world-app" id=68a3a6e0-31c1-44da-9385-524baf142a83 name=/runtime.v1.RuntimeService/CreateContainer
	Jul 31 10:38:35 addons-764200 crio[950]: time="2023-07-31 10:38:35.265464872Z" level=warning msg="Allowed annotations are specified for workload []"
	Jul 31 10:38:35 addons-764200 crio[950]: time="2023-07-31 10:38:35.349842889Z" level=info msg="Created container d6880fa9065813b26c9510746315145e8f3eb21c3ccbdb775634fc54858e1afb: default/hello-world-app-65bdb79f98-f9nkc/hello-world-app" id=68a3a6e0-31c1-44da-9385-524baf142a83 name=/runtime.v1.RuntimeService/CreateContainer
	Jul 31 10:38:35 addons-764200 crio[950]: time="2023-07-31 10:38:35.350379543Z" level=info msg="Starting container: d6880fa9065813b26c9510746315145e8f3eb21c3ccbdb775634fc54858e1afb" id=ee5bed31-0420-4881-859e-12f00c1d9cf8 name=/runtime.v1.RuntimeService/StartContainer
	Jul 31 10:38:35 addons-764200 crio[950]: time="2023-07-31 10:38:35.358337269Z" level=info msg="Started container" PID=9472 containerID=d6880fa9065813b26c9510746315145e8f3eb21c3ccbdb775634fc54858e1afb description=default/hello-world-app-65bdb79f98-f9nkc/hello-world-app id=ee5bed31-0420-4881-859e-12f00c1d9cf8 name=/runtime.v1.RuntimeService/StartContainer sandboxID=c4333624274012f2e2474b8d21b3d2c970cc6c938c7de1ea6c24cb7deed09b06
	Jul 31 10:38:35 addons-764200 crio[950]: time="2023-07-31 10:38:35.832814862Z" level=info msg="Removing container: 752898d2fd5c49b9079e8b70a5d3e5161b40ae2aa779ff42522a6e079bc2be4b" id=7ffad941-fe86-4fb5-81d6-7f66ae21e20f name=/runtime.v1.RuntimeService/RemoveContainer
	Jul 31 10:38:35 addons-764200 crio[950]: time="2023-07-31 10:38:35.849126965Z" level=info msg="Removed container 752898d2fd5c49b9079e8b70a5d3e5161b40ae2aa779ff42522a6e079bc2be4b: kube-system/kube-ingress-dns-minikube/minikube-ingress-dns" id=7ffad941-fe86-4fb5-81d6-7f66ae21e20f name=/runtime.v1.RuntimeService/RemoveContainer
	Jul 31 10:38:36 addons-764200 crio[950]: time="2023-07-31 10:38:36.349763357Z" level=info msg="Stopping container: c0456423168d7670558eaefa6eb6b70bbc1d5f7fc6f7bd99ba0b04da94e33dec (timeout: 1s)" id=d01cc71b-70a6-448e-ad52-316fff80d14d name=/runtime.v1.RuntimeService/StopContainer
	Jul 31 10:38:37 addons-764200 crio[950]: time="2023-07-31 10:38:37.360097141Z" level=warning msg="Stopping container c0456423168d7670558eaefa6eb6b70bbc1d5f7fc6f7bd99ba0b04da94e33dec with stop signal timed out: timeout reached after 1 seconds waiting for container process to exit" id=d01cc71b-70a6-448e-ad52-316fff80d14d name=/runtime.v1.RuntimeService/StopContainer
	Jul 31 10:38:37 addons-764200 conmon[6115]: conmon c0456423168d7670558e <ninfo>: container 6127 exited with status 137
	Jul 31 10:38:37 addons-764200 crio[950]: time="2023-07-31 10:38:37.502692085Z" level=info msg="Stopped container c0456423168d7670558eaefa6eb6b70bbc1d5f7fc6f7bd99ba0b04da94e33dec: ingress-nginx/ingress-nginx-controller-7799c6795f-bscvk/controller" id=d01cc71b-70a6-448e-ad52-316fff80d14d name=/runtime.v1.RuntimeService/StopContainer
	Jul 31 10:38:37 addons-764200 crio[950]: time="2023-07-31 10:38:37.503131640Z" level=info msg="Stopping pod sandbox: d168641658a5e9cc0e9cc6cfeb5cb4668cf97465a8a641ad5db1280b2c0390d0" id=8e207054-0ae7-4087-a7d8-cbe9eefd284b name=/runtime.v1.RuntimeService/StopPodSandbox
	Jul 31 10:38:37 addons-764200 crio[950]: time="2023-07-31 10:38:37.505841486Z" level=info msg="Restoring iptables rules: *nat\n:KUBE-HOSTPORTS - [0:0]\n:KUBE-HP-WJT2J7KCL4R2RH3X - [0:0]\n:KUBE-HP-MQLV4K46GAI5FC6X - [0:0]\n-X KUBE-HP-MQLV4K46GAI5FC6X\n-X KUBE-HP-WJT2J7KCL4R2RH3X\nCOMMIT\n"
	Jul 31 10:38:37 addons-764200 crio[950]: time="2023-07-31 10:38:37.507160531Z" level=info msg="Closing host port tcp:80"
	Jul 31 10:38:37 addons-764200 crio[950]: time="2023-07-31 10:38:37.507203466Z" level=info msg="Closing host port tcp:443"
	Jul 31 10:38:37 addons-764200 crio[950]: time="2023-07-31 10:38:37.509878296Z" level=info msg="Host port tcp:80 does not have an open socket"
	Jul 31 10:38:37 addons-764200 crio[950]: time="2023-07-31 10:38:37.509912544Z" level=info msg="Host port tcp:443 does not have an open socket"
	Jul 31 10:38:37 addons-764200 crio[950]: time="2023-07-31 10:38:37.510049523Z" level=info msg="Got pod network &{Name:ingress-nginx-controller-7799c6795f-bscvk Namespace:ingress-nginx ID:d168641658a5e9cc0e9cc6cfeb5cb4668cf97465a8a641ad5db1280b2c0390d0 UID:c349b648-2877-496c-816d-60ceee2891a5 NetNS:/var/run/netns/9a1e4411-51c6-4dd3-9381-eac20c0f765b Networks:[{Name:kindnet Ifname:eth0}] RuntimeConfig:map[kindnet:{IP: MAC: PortMappings:[] Bandwidth:<nil> IpRanges:[]}] Aliases:map[]}"
	Jul 31 10:38:37 addons-764200 crio[950]: time="2023-07-31 10:38:37.510237892Z" level=info msg="Deleting pod ingress-nginx_ingress-nginx-controller-7799c6795f-bscvk from CNI network \"kindnet\" (type=ptp)"
	Jul 31 10:38:37 addons-764200 crio[950]: time="2023-07-31 10:38:37.543609195Z" level=info msg="Stopped pod sandbox: d168641658a5e9cc0e9cc6cfeb5cb4668cf97465a8a641ad5db1280b2c0390d0" id=8e207054-0ae7-4087-a7d8-cbe9eefd284b name=/runtime.v1.RuntimeService/StopPodSandbox
	Jul 31 10:38:37 addons-764200 crio[950]: time="2023-07-31 10:38:37.838855660Z" level=info msg="Removing container: c0456423168d7670558eaefa6eb6b70bbc1d5f7fc6f7bd99ba0b04da94e33dec" id=cd1ac432-2654-4ebb-bac8-84cdc0186797 name=/runtime.v1.RuntimeService/RemoveContainer
	Jul 31 10:38:37 addons-764200 crio[950]: time="2023-07-31 10:38:37.854404169Z" level=info msg="Removed container c0456423168d7670558eaefa6eb6b70bbc1d5f7fc6f7bd99ba0b04da94e33dec: ingress-nginx/ingress-nginx-controller-7799c6795f-bscvk/controller" id=cd1ac432-2654-4ebb-bac8-84cdc0186797 name=/runtime.v1.RuntimeService/RemoveContainer
	
	* 
	* ==> container status <==
	* CONTAINER           IMAGE                                                                                                                        CREATED             STATE               NAME                      ATTEMPT             POD ID              POD
	d6880fa906581       gcr.io/google-samples/hello-app@sha256:845f77fab71033404f4cfceaa1ddb27b70c3551ceb22a5e7f4498cdda6c9daea                      9 seconds ago       Running             hello-world-app           0                   c433362427401       hello-world-app-65bdb79f98-f9nkc
	1b110b3a2f822       ghcr.io/headlamp-k8s/headlamp@sha256:67ba87b88218563eec9684525904936609713b02dcbcf4390cd055766217ed45                        2 minutes ago       Running             headlamp                  0                   9dc1d387b4746       headlamp-66f6498c69-6j98t
	52c32af3609ca       docker.io/library/nginx@sha256:2d194184b067db3598771b4cf326cfe6ad5051937ba1132b8b7d4b0184e0d0a6                              2 minutes ago       Running             nginx                     0                   985056b8f5dc6       nginx
	d24e67486968c       7e7451bb70423d31bdadcf0a71a3107b64858eccd7827d066234650b5e7b36b0                                                             2 minutes ago       Exited              patch                     2                   267fce0b9cf94       ingress-nginx-admission-patch-7kttm
	5a45bef9ce5d0       gcr.io/k8s-minikube/gcp-auth-webhook@sha256:3e92b3d1c15220ae0f2f3505fb3a88899a1e48ec85fb777a1a4945ae9db2ce06                 2 minutes ago       Running             gcp-auth                  0                   77f056a6cc2be       gcp-auth-58478865f7-vd7jf
	a87a8bb5fd374       registry.k8s.io/ingress-nginx/kube-webhook-certgen@sha256:04b38ca48bcadd0c3644dc7f2ae14358ae41b628f9d1bdbf80f35ff880d9462d   3 minutes ago       Exited              create                    0                   81282628ffd22       ingress-nginx-admission-create-6vdps
	d2bf13367ca80       6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562                                                             3 minutes ago       Running             storage-provisioner       0                   cf8b2fa9ddec7       storage-provisioner
	05223ac89f76b       ead0a4a53df89fd173874b46093b6e62d8c72967bbf606d672c9e8c9b601a4fc                                                             3 minutes ago       Running             coredns                   0                   985b02e594bda       coredns-5d78c9869d-9x8dj
	c2c63548d3655       5780543258cf06f98595c003c0c6d22768d1fc8e9852e2839018a4bb3bfe163c                                                             3 minutes ago       Running             kube-proxy                0                   5e4d913630cbe       kube-proxy-m4bbq
	007f76c3780c5       b0b1fa0f58c6e932b7f20bf208b2841317a1e8c88cc51b18358310bbd8ec95da                                                             3 minutes ago       Running             kindnet-cni               0                   e9e1c6d6f4757       kindnet-j2fcw
	553026fc1df52       41697ceeb70b3f49e54ed46f2cf27ac5b3a201a7d9668ca327588b23fafdf36a                                                             4 minutes ago       Running             kube-scheduler            0                   7a7ce2c606a6e       kube-scheduler-addons-764200
	0a571456508c8       7cffc01dba0e151e525544f87958d12c0fa62a9f173bbc930200ce815f2aaf3f                                                             4 minutes ago       Running             kube-controller-manager   0                   6d1d6ecdc383f       kube-controller-manager-addons-764200
	1ae9d53052487       08a0c939e61b7340db53ebf07b4d0e908a35ad8d94e2cb7d0f958210e567079a                                                             4 minutes ago       Running             kube-apiserver            0                   af8ff6d7928fd       kube-apiserver-addons-764200
	9ade5fcffe970       86b6af7dd652c1b38118be1c338e9354b33469e69a218f7e290a0ca5304ad681                                                             4 minutes ago       Running             etcd                      0                   9b173db898be3       etcd-addons-764200
	
	* 
	* ==> coredns [05223ac89f76b88531d67e857aa578112e56b03adaa26592bcc192aa8cf4e974] <==
	* [INFO] 10.244.0.8:32778 - 10511 "AAAA IN registry.kube-system.svc.cluster.local.cluster.local. udp 70 false 512" NXDOMAIN qr,aa,rd 163 0.000085999s
	[INFO] 10.244.0.8:36652 - 18661 "A IN registry.kube-system.svc.cluster.local.us-central1-a.c.k8s-minikube.internal. udp 94 false 512" NXDOMAIN qr,rd,ra 94 0.00452918s
	[INFO] 10.244.0.8:36652 - 50968 "AAAA IN registry.kube-system.svc.cluster.local.us-central1-a.c.k8s-minikube.internal. udp 94 false 512" NXDOMAIN qr,rd,ra 94 0.004988596s
	[INFO] 10.244.0.8:37973 - 20172 "A IN registry.kube-system.svc.cluster.local.c.k8s-minikube.internal. udp 80 false 512" NXDOMAIN qr,rd,ra 80 0.004660445s
	[INFO] 10.244.0.8:37973 - 42958 "AAAA IN registry.kube-system.svc.cluster.local.c.k8s-minikube.internal. udp 80 false 512" NXDOMAIN qr,rd,ra 80 0.005801027s
	[INFO] 10.244.0.8:57297 - 64704 "AAAA IN registry.kube-system.svc.cluster.local.google.internal. udp 72 false 512" NXDOMAIN qr,rd,ra 72 0.004335317s
	[INFO] 10.244.0.8:57297 - 41020 "A IN registry.kube-system.svc.cluster.local.google.internal. udp 72 false 512" NXDOMAIN qr,rd,ra 72 0.004518417s
	[INFO] 10.244.0.8:59756 - 62643 "A IN registry.kube-system.svc.cluster.local. udp 56 false 512" NOERROR qr,aa,rd 110 0.000060987s
	[INFO] 10.244.0.8:59756 - 62897 "AAAA IN registry.kube-system.svc.cluster.local. udp 56 false 512" NOERROR qr,aa,rd 149 0.00011529s
	[INFO] 10.244.0.17:50927 - 55582 "A IN storage.googleapis.com.gcp-auth.svc.cluster.local. udp 78 false 1232" NXDOMAIN qr,aa,rd 160 0.00020094s
	[INFO] 10.244.0.17:51519 - 40657 "AAAA IN storage.googleapis.com.gcp-auth.svc.cluster.local. udp 78 false 1232" NXDOMAIN qr,aa,rd 160 0.000271437s
	[INFO] 10.244.0.17:39588 - 27630 "A IN storage.googleapis.com.svc.cluster.local. udp 69 false 1232" NXDOMAIN qr,aa,rd 151 0.000098009s
	[INFO] 10.244.0.17:51622 - 38350 "AAAA IN storage.googleapis.com.svc.cluster.local. udp 69 false 1232" NXDOMAIN qr,aa,rd 151 0.000139626s
	[INFO] 10.244.0.17:42075 - 52429 "AAAA IN storage.googleapis.com.cluster.local. udp 65 false 1232" NXDOMAIN qr,aa,rd 147 0.000111155s
	[INFO] 10.244.0.17:39489 - 32920 "A IN storage.googleapis.com.cluster.local. udp 65 false 1232" NXDOMAIN qr,aa,rd 147 0.000089563s
	[INFO] 10.244.0.17:43193 - 56826 "A IN storage.googleapis.com.us-central1-a.c.k8s-minikube.internal. udp 89 false 1232" NXDOMAIN qr,rd,ra 78 0.006661491s
	[INFO] 10.244.0.17:51676 - 48435 "AAAA IN storage.googleapis.com.us-central1-a.c.k8s-minikube.internal. udp 89 false 1232" NXDOMAIN qr,rd,ra 78 0.006783106s
	[INFO] 10.244.0.17:50976 - 44764 "A IN storage.googleapis.com.c.k8s-minikube.internal. udp 75 false 1232" NXDOMAIN qr,rd,ra 64 0.006004612s
	[INFO] 10.244.0.17:45142 - 60228 "AAAA IN storage.googleapis.com.c.k8s-minikube.internal. udp 75 false 1232" NXDOMAIN qr,rd,ra 64 0.007649085s
	[INFO] 10.244.0.17:53884 - 34903 "A IN storage.googleapis.com.google.internal. udp 67 false 1232" NXDOMAIN qr,rd,ra 56 0.005283587s
	[INFO] 10.244.0.17:47796 - 13972 "AAAA IN storage.googleapis.com.google.internal. udp 67 false 1232" NXDOMAIN qr,rd,ra 56 0.005296899s
	[INFO] 10.244.0.17:36138 - 33750 "AAAA IN storage.googleapis.com. udp 51 false 1232" NOERROR qr,rd,ra 240 0.000612488s
	[INFO] 10.244.0.17:51399 - 36134 "A IN storage.googleapis.com. udp 51 false 1232" NOERROR qr,rd,ra 648 0.00075553s
	[INFO] 10.244.0.19:42610 - 2 "AAAA IN registry.kube-system.svc.cluster.local. udp 56 false 512" NOERROR qr,aa,rd 149 0.000168777s
	[INFO] 10.244.0.19:39245 - 3 "A IN registry.kube-system.svc.cluster.local. udp 56 false 512" NOERROR qr,aa,rd 110 0.000118378s
	
	* 
	* ==> describe nodes <==
	* Name:               addons-764200
	Roles:              control-plane
	Labels:             beta.kubernetes.io/arch=amd64
	                    beta.kubernetes.io/os=linux
	                    kubernetes.io/arch=amd64
	                    kubernetes.io/hostname=addons-764200
	                    kubernetes.io/os=linux
	                    minikube.k8s.io/commit=a7848ba25aaaad8ebb50e721c0d343e471188fc7
	                    minikube.k8s.io/name=addons-764200
	                    minikube.k8s.io/primary=true
	                    minikube.k8s.io/updated_at=2023_07_31T10_34_33_0700
	                    minikube.k8s.io/version=v1.31.1
	                    node-role.kubernetes.io/control-plane=
	                    node.kubernetes.io/exclude-from-external-load-balancers=
	                    topology.hostpath.csi/node=addons-764200
	Annotations:        kubeadm.alpha.kubernetes.io/cri-socket: unix:///var/run/crio/crio.sock
	                    node.alpha.kubernetes.io/ttl: 0
	                    volumes.kubernetes.io/controller-managed-attach-detach: true
	CreationTimestamp:  Mon, 31 Jul 2023 10:34:30 +0000
	Taints:             <none>
	Unschedulable:      false
	Lease:
	  HolderIdentity:  addons-764200
	  AcquireTime:     <unset>
	  RenewTime:       Mon, 31 Jul 2023 10:38:37 +0000
	Conditions:
	  Type             Status  LastHeartbeatTime                 LastTransitionTime                Reason                       Message
	  ----             ------  -----------------                 ------------------                ------                       -------
	  MemoryPressure   False   Mon, 31 Jul 2023 10:37:05 +0000   Mon, 31 Jul 2023 10:34:28 +0000   KubeletHasSufficientMemory   kubelet has sufficient memory available
	  DiskPressure     False   Mon, 31 Jul 2023 10:37:05 +0000   Mon, 31 Jul 2023 10:34:28 +0000   KubeletHasNoDiskPressure     kubelet has no disk pressure
	  PIDPressure      False   Mon, 31 Jul 2023 10:37:05 +0000   Mon, 31 Jul 2023 10:34:28 +0000   KubeletHasSufficientPID      kubelet has sufficient PID available
	  Ready            True    Mon, 31 Jul 2023 10:37:05 +0000   Mon, 31 Jul 2023 10:35:19 +0000   KubeletReady                 kubelet is posting ready status
	Addresses:
	  InternalIP:  192.168.49.2
	  Hostname:    addons-764200
	Capacity:
	  cpu:                8
	  ephemeral-storage:  304681132Ki
	  hugepages-1Gi:      0
	  hugepages-2Mi:      0
	  memory:             32859436Ki
	  pods:               110
	Allocatable:
	  cpu:                8
	  ephemeral-storage:  304681132Ki
	  hugepages-1Gi:      0
	  hugepages-2Mi:      0
	  memory:             32859436Ki
	  pods:               110
	System Info:
	  Machine ID:                 10ee51760cb0414ca7b4b1bd756f435a
	  System UUID:                e40b5a17-097d-47e0-a215-85b234e88c82
	  Boot ID:                    29fc075f-138b-4be6-bf1b-3db3f063b35c
	  Kernel Version:             5.15.0-1038-gcp
	  OS Image:                   Ubuntu 22.04.2 LTS
	  Operating System:           linux
	  Architecture:               amd64
	  Container Runtime Version:  cri-o://1.24.6
	  Kubelet Version:            v1.27.3
	  Kube-Proxy Version:         v1.27.3
	PodCIDR:                      10.244.0.0/24
	PodCIDRs:                     10.244.0.0/24
	Non-terminated Pods:          (12 in total)
	  Namespace                   Name                                     CPU Requests  CPU Limits  Memory Requests  Memory Limits  Age
	  ---------                   ----                                     ------------  ----------  ---------------  -------------  ---
	  default                     hello-world-app-65bdb79f98-f9nkc         0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         11s
	  default                     nginx                                    0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         2m32s
	  gcp-auth                    gcp-auth-58478865f7-vd7jf                0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         3m50s
	  headlamp                    headlamp-66f6498c69-6j98t                0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         2m31s
	  kube-system                 coredns-5d78c9869d-9x8dj                 100m (1%!)(MISSING)     0 (0%!)(MISSING)      70Mi (0%!)(MISSING)        170Mi (0%!)(MISSING)     3m59s
	  kube-system                 etcd-addons-764200                       100m (1%!)(MISSING)     0 (0%!)(MISSING)      100Mi (0%!)(MISSING)       0 (0%!)(MISSING)         4m11s
	  kube-system                 kindnet-j2fcw                            100m (1%!)(MISSING)     100m (1%!)(MISSING)   50Mi (0%!)(MISSING)        50Mi (0%!)(MISSING)      3m59s
	  kube-system                 kube-apiserver-addons-764200             250m (3%!)(MISSING)     0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         4m11s
	  kube-system                 kube-controller-manager-addons-764200    200m (2%!)(MISSING)     0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         4m11s
	  kube-system                 kube-proxy-m4bbq                         0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         3m59s
	  kube-system                 kube-scheduler-addons-764200             100m (1%!)(MISSING)     0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         4m13s
	  kube-system                 storage-provisioner                      0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         3m54s
	Allocated resources:
	  (Total limits may be over 100 percent, i.e., overcommitted.)
	  Resource           Requests    Limits
	  --------           --------    ------
	  cpu                850m (10%!)(MISSING)  100m (1%!)(MISSING)
	  memory             220Mi (0%!)(MISSING)  220Mi (0%!)(MISSING)
	  ephemeral-storage  0 (0%!)(MISSING)      0 (0%!)(MISSING)
	  hugepages-1Gi      0 (0%!)(MISSING)      0 (0%!)(MISSING)
	  hugepages-2Mi      0 (0%!)(MISSING)      0 (0%!)(MISSING)
	Events:
	  Type    Reason                   Age                    From             Message
	  ----    ------                   ----                   ----             -------
	  Normal  Starting                 3m54s                  kube-proxy       
	  Normal  Starting                 4m18s                  kubelet          Starting kubelet.
	  Normal  NodeHasSufficientMemory  4m17s (x8 over 4m18s)  kubelet          Node addons-764200 status is now: NodeHasSufficientMemory
	  Normal  NodeHasNoDiskPressure    4m17s (x8 over 4m18s)  kubelet          Node addons-764200 status is now: NodeHasNoDiskPressure
	  Normal  NodeHasSufficientPID     4m17s (x8 over 4m18s)  kubelet          Node addons-764200 status is now: NodeHasSufficientPID
	  Normal  Starting                 4m12s                  kubelet          Starting kubelet.
	  Normal  NodeHasSufficientMemory  4m12s                  kubelet          Node addons-764200 status is now: NodeHasSufficientMemory
	  Normal  NodeHasNoDiskPressure    4m12s                  kubelet          Node addons-764200 status is now: NodeHasNoDiskPressure
	  Normal  NodeHasSufficientPID     4m12s                  kubelet          Node addons-764200 status is now: NodeHasSufficientPID
	  Normal  RegisteredNode           4m                     node-controller  Node addons-764200 event: Registered Node addons-764200 in Controller
	  Normal  NodeReady                3m25s                  kubelet          Node addons-764200 status is now: NodeReady
	
	* 
	* ==> dmesg <==
	* [  +0.007317] device-mapper: core: CONFIG_IMA_DISABLE_HTABLE is disabled. Duplicate IMA measurements will not be recorded in the IMA log.
	[  +0.003158] platform eisa.0: EISA: Cannot allocate resource for mainboard
	[  +0.000679] platform eisa.0: Cannot allocate resource for EISA slot 1
	[  +0.000674] platform eisa.0: Cannot allocate resource for EISA slot 2
	[  +0.000641] platform eisa.0: Cannot allocate resource for EISA slot 3
	[  +0.000669] platform eisa.0: Cannot allocate resource for EISA slot 4
	[  +0.000644] platform eisa.0: Cannot allocate resource for EISA slot 5
	[  +0.000615] platform eisa.0: Cannot allocate resource for EISA slot 6
	[  +0.000618] platform eisa.0: Cannot allocate resource for EISA slot 7
	[  +0.000659] platform eisa.0: Cannot allocate resource for EISA slot 8
	[  +9.748684] kauditd_printk_skb: 36 callbacks suppressed
	[Jul31 10:36] IPv4: martian source 10.244.0.18 from 127.0.0.1, on dev eth0
	[  +0.000006] ll header: 00000000: 6a eb 19 db a2 c3 16 22 6c 26 69 0a 08 00
	[  +1.016288] IPv4: martian source 10.244.0.18 from 127.0.0.1, on dev eth0
	[  +0.000007] ll header: 00000000: 6a eb 19 db a2 c3 16 22 6c 26 69 0a 08 00
	[  +2.015842] IPv4: martian source 10.244.0.18 from 127.0.0.1, on dev eth0
	[  +0.000024] ll header: 00000000: 6a eb 19 db a2 c3 16 22 6c 26 69 0a 08 00
	[  +4.063574] IPv4: martian source 10.244.0.18 from 127.0.0.1, on dev eth0
	[  +0.000024] ll header: 00000000: 6a eb 19 db a2 c3 16 22 6c 26 69 0a 08 00
	[  +8.191173] IPv4: martian source 10.244.0.18 from 127.0.0.1, on dev eth0
	[  +0.000006] ll header: 00000000: 6a eb 19 db a2 c3 16 22 6c 26 69 0a 08 00
	[ +16.126449] IPv4: martian source 10.244.0.18 from 127.0.0.1, on dev eth0
	[  +0.000006] ll header: 00000000: 6a eb 19 db a2 c3 16 22 6c 26 69 0a 08 00
	[Jul31 10:37] IPv4: martian source 10.244.0.18 from 127.0.0.1, on dev eth0
	[  +0.000021] ll header: 00000000: 6a eb 19 db a2 c3 16 22 6c 26 69 0a 08 00
	
	* 
	* ==> etcd [9ade5fcffe970c50783657f25cc08fc94bb55881ba6e91c8cbcd984a3fd3be68] <==
	* {"level":"info","ts":"2023-07-31T10:34:28.628Z","caller":"embed/serve.go:198","msg":"serving client traffic securely","address":"192.168.49.2:2379"}
	{"level":"info","ts":"2023-07-31T10:34:28.628Z","caller":"embed/serve.go:198","msg":"serving client traffic securely","address":"127.0.0.1:2379"}
	{"level":"info","ts":"2023-07-31T10:34:48.321Z","caller":"traceutil/trace.go:171","msg":"trace[104545228] transaction","detail":"{read_only:false; response_revision:419; number_of_response:1; }","duration":"109.22788ms","start":"2023-07-31T10:34:48.212Z","end":"2023-07-31T10:34:48.321Z","steps":["trace[104545228] 'process raft request'  (duration: 109.048884ms)"],"step_count":1}
	{"level":"info","ts":"2023-07-31T10:34:48.325Z","caller":"traceutil/trace.go:171","msg":"trace[1244522750] transaction","detail":"{read_only:false; response_revision:420; number_of_response:1; }","duration":"107.141021ms","start":"2023-07-31T10:34:48.217Z","end":"2023-07-31T10:34:48.325Z","steps":["trace[1244522750] 'process raft request'  (duration: 106.775858ms)"],"step_count":1}
	{"level":"info","ts":"2023-07-31T10:34:49.305Z","caller":"traceutil/trace.go:171","msg":"trace[1906949855] transaction","detail":"{read_only:false; response_revision:433; number_of_response:1; }","duration":"100.800276ms","start":"2023-07-31T10:34:49.204Z","end":"2023-07-31T10:34:49.305Z","steps":["trace[1906949855] 'process raft request'  (duration: 19.823215ms)","trace[1906949855] 'compare'  (duration: 80.57799ms)"],"step_count":2}
	{"level":"info","ts":"2023-07-31T10:34:49.306Z","caller":"traceutil/trace.go:171","msg":"trace[588332892] linearizableReadLoop","detail":"{readStateIndex:446; appliedIndex:445; }","duration":"101.332525ms","start":"2023-07-31T10:34:49.205Z","end":"2023-07-31T10:34:49.306Z","steps":["trace[588332892] 'read index received'  (duration: 18.564674ms)","trace[588332892] 'applied index is now lower than readState.Index'  (duration: 82.767011ms)"],"step_count":2}
	{"level":"warn","ts":"2023-07-31T10:34:49.307Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"102.43763ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/minions/addons-764200\" ","response":"range_response_count:1 size:5654"}
	{"level":"info","ts":"2023-07-31T10:34:49.309Z","caller":"traceutil/trace.go:171","msg":"trace[65088235] range","detail":"{range_begin:/registry/minions/addons-764200; range_end:; response_count:1; response_revision:434; }","duration":"103.904648ms","start":"2023-07-31T10:34:49.205Z","end":"2023-07-31T10:34:49.309Z","steps":["trace[65088235] 'agreement among raft nodes before linearized reading'  (duration: 102.381173ms)"],"step_count":1}
	{"level":"info","ts":"2023-07-31T10:34:49.906Z","caller":"traceutil/trace.go:171","msg":"trace[837714575] transaction","detail":"{read_only:false; response_revision:456; number_of_response:1; }","duration":"194.544689ms","start":"2023-07-31T10:34:49.711Z","end":"2023-07-31T10:34:49.906Z","steps":["trace[837714575] 'process raft request'  (duration: 104.024357ms)","trace[837714575] 'compare'  (duration: 89.825172ms)"],"step_count":2}
	{"level":"info","ts":"2023-07-31T10:34:49.906Z","caller":"traceutil/trace.go:171","msg":"trace[963916507] linearizableReadLoop","detail":"{readStateIndex:469; appliedIndex:468; }","duration":"194.17879ms","start":"2023-07-31T10:34:49.712Z","end":"2023-07-31T10:34:49.906Z","steps":["trace[963916507] 'read index received'  (duration: 16.401559ms)","trace[963916507] 'applied index is now lower than readState.Index'  (duration: 177.776005ms)"],"step_count":2}
	{"level":"warn","ts":"2023-07-31T10:34:49.909Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"197.552046ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/serviceaccounts/kube-system/tiller\" ","response":"range_response_count:0 size:5"}
	{"level":"info","ts":"2023-07-31T10:34:49.914Z","caller":"traceutil/trace.go:171","msg":"trace[1691908377] range","detail":"{range_begin:/registry/serviceaccounts/kube-system/tiller; range_end:; response_count:0; response_revision:458; }","duration":"201.93149ms","start":"2023-07-31T10:34:49.712Z","end":"2023-07-31T10:34:49.914Z","steps":["trace[1691908377] 'agreement among raft nodes before linearized reading'  (duration: 197.44816ms)"],"step_count":1}
	{"level":"warn","ts":"2023-07-31T10:34:49.914Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"106.093191ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/pods/kube-system/kube-ingress-dns-minikube\" ","response":"range_response_count:0 size:5"}
	{"level":"warn","ts":"2023-07-31T10:34:49.910Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"197.786487ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/replicasets/default/cloud-spanner-emulator-88647b4cb\" ","response":"range_response_count:1 size:2228"}
	{"level":"info","ts":"2023-07-31T10:34:49.912Z","caller":"traceutil/trace.go:171","msg":"trace[155875189] transaction","detail":"{read_only:false; response_revision:457; number_of_response:1; }","duration":"101.538459ms","start":"2023-07-31T10:34:49.811Z","end":"2023-07-31T10:34:49.912Z","steps":["trace[155875189] 'process raft request'  (duration: 94.589653ms)"],"step_count":1}
	{"level":"warn","ts":"2023-07-31T10:34:49.914Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"202.031576ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/controllers/kube-system/registry\" ","response":"range_response_count:0 size:5"}
	{"level":"info","ts":"2023-07-31T10:34:49.922Z","caller":"traceutil/trace.go:171","msg":"trace[1629450343] range","detail":"{range_begin:/registry/controllers/kube-system/registry; range_end:; response_count:0; response_revision:458; }","duration":"210.52507ms","start":"2023-07-31T10:34:49.712Z","end":"2023-07-31T10:34:49.922Z","steps":["trace[1629450343] 'agreement among raft nodes before linearized reading'  (duration: 201.987672ms)"],"step_count":1}
	{"level":"info","ts":"2023-07-31T10:34:49.922Z","caller":"traceutil/trace.go:171","msg":"trace[1447743271] range","detail":"{range_begin:/registry/pods/kube-system/kube-ingress-dns-minikube; range_end:; response_count:0; response_revision:458; }","duration":"114.368807ms","start":"2023-07-31T10:34:49.808Z","end":"2023-07-31T10:34:49.922Z","steps":["trace[1447743271] 'agreement among raft nodes before linearized reading'  (duration: 106.060731ms)"],"step_count":1}
	{"level":"info","ts":"2023-07-31T10:34:49.922Z","caller":"traceutil/trace.go:171","msg":"trace[60798011] range","detail":"{range_begin:/registry/replicasets/default/cloud-spanner-emulator-88647b4cb; range_end:; response_count:1; response_revision:458; }","duration":"210.634953ms","start":"2023-07-31T10:34:49.712Z","end":"2023-07-31T10:34:49.922Z","steps":["trace[60798011] 'agreement among raft nodes before linearized reading'  (duration: 197.745059ms)"],"step_count":1}
	{"level":"info","ts":"2023-07-31T10:35:35.007Z","caller":"traceutil/trace.go:171","msg":"trace[2057259337] transaction","detail":"{read_only:false; response_revision:939; number_of_response:1; }","duration":"116.511445ms","start":"2023-07-31T10:35:34.891Z","end":"2023-07-31T10:35:35.007Z","steps":["trace[2057259337] 'process raft request'  (duration: 116.39629ms)"],"step_count":1}
	{"level":"warn","ts":"2023-07-31T10:35:35.267Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"121.551405ms","expected-duration":"100ms","prefix":"","request":"header:<ID:8128022797235106398 username:\"kube-apiserver-etcd-client\" auth_revision:1 > txn:<compare:<target:MOD key:\"/registry/events/gcp-auth/gcp-auth-certs-create.1776ed669bf8b481\" mod_revision:0 > success:<request_put:<key:\"/registry/events/gcp-auth/gcp-auth-certs-create.1776ed669bf8b481\" value_size:531 lease:8128022797235106306 >> failure:<>>","response":"size:16"}
	{"level":"info","ts":"2023-07-31T10:35:35.267Z","caller":"traceutil/trace.go:171","msg":"trace[569300543] transaction","detail":"{read_only:false; response_revision:941; number_of_response:1; }","duration":"183.047066ms","start":"2023-07-31T10:35:35.084Z","end":"2023-07-31T10:35:35.267Z","steps":["trace[569300543] 'process raft request'  (duration: 61.083084ms)","trace[569300543] 'compare'  (duration: 121.472773ms)"],"step_count":2}
	{"level":"info","ts":"2023-07-31T10:35:35.405Z","caller":"traceutil/trace.go:171","msg":"trace[34748280] transaction","detail":"{read_only:false; response_revision:942; number_of_response:1; }","duration":"128.085159ms","start":"2023-07-31T10:35:35.277Z","end":"2023-07-31T10:35:35.405Z","steps":["trace[34748280] 'process raft request'  (duration: 127.915303ms)"],"step_count":1}
	{"level":"info","ts":"2023-07-31T10:36:12.956Z","caller":"traceutil/trace.go:171","msg":"trace[354487024] transaction","detail":"{read_only:false; response_revision:1240; number_of_response:1; }","duration":"163.921567ms","start":"2023-07-31T10:36:12.792Z","end":"2023-07-31T10:36:12.956Z","steps":["trace[354487024] 'process raft request'  (duration: 101.49301ms)","trace[354487024] 'compare'  (duration: 62.317537ms)"],"step_count":2}
	{"level":"info","ts":"2023-07-31T10:36:29.618Z","caller":"traceutil/trace.go:171","msg":"trace[243144482] transaction","detail":"{read_only:false; response_revision:1359; number_of_response:1; }","duration":"115.157361ms","start":"2023-07-31T10:36:29.503Z","end":"2023-07-31T10:36:29.618Z","steps":["trace[243144482] 'process raft request'  (duration: 59.061263ms)","trace[243144482] 'compare'  (duration: 55.944625ms)"],"step_count":2}
	
	* 
	* ==> gcp-auth [5a45bef9ce5d08dd08921d29d7af29d12eede438075fbb346975a41f5089ce99] <==
	* 2023/07/31 10:35:46 GCP Auth Webhook started!
	2023/07/31 10:36:09 Ready to marshal response ...
	2023/07/31 10:36:09 Ready to write response ...
	2023/07/31 10:36:12 Ready to marshal response ...
	2023/07/31 10:36:12 Ready to write response ...
	2023/07/31 10:36:13 Ready to marshal response ...
	2023/07/31 10:36:13 Ready to write response ...
	2023/07/31 10:36:13 Ready to marshal response ...
	2023/07/31 10:36:13 Ready to write response ...
	2023/07/31 10:36:13 Ready to marshal response ...
	2023/07/31 10:36:13 Ready to write response ...
	2023/07/31 10:36:20 Ready to marshal response ...
	2023/07/31 10:36:20 Ready to write response ...
	2023/07/31 10:36:29 Ready to marshal response ...
	2023/07/31 10:36:29 Ready to write response ...
	2023/07/31 10:36:59 Ready to marshal response ...
	2023/07/31 10:36:59 Ready to write response ...
	2023/07/31 10:38:33 Ready to marshal response ...
	2023/07/31 10:38:33 Ready to write response ...
	
	* 
	* ==> kernel <==
	*  10:38:44 up 21 min,  0 users,  load average: 0.53, 0.72, 0.36
	Linux addons-764200 5.15.0-1038-gcp #46~20.04.1-Ubuntu SMP Fri Jul 14 09:48:19 UTC 2023 x86_64 x86_64 x86_64 GNU/Linux
	PRETTY_NAME="Ubuntu 22.04.2 LTS"
	
	* 
	* ==> kindnet [007f76c3780c5ef75bce59364e9671dc3e8b76f72ef24db4fdec6b22ce5cf6a9] <==
	* I0731 10:36:38.828447       1 main.go:227] handling current node
	I0731 10:36:48.837693       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I0731 10:36:48.837714       1 main.go:227] handling current node
	I0731 10:36:58.852152       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I0731 10:36:58.852178       1 main.go:227] handling current node
	I0731 10:37:08.908436       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I0731 10:37:08.908476       1 main.go:227] handling current node
	I0731 10:37:18.912435       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I0731 10:37:18.912457       1 main.go:227] handling current node
	I0731 10:37:28.919587       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I0731 10:37:28.919611       1 main.go:227] handling current node
	I0731 10:37:38.923735       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I0731 10:37:38.923758       1 main.go:227] handling current node
	I0731 10:37:48.928168       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I0731 10:37:48.928189       1 main.go:227] handling current node
	I0731 10:37:58.932494       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I0731 10:37:58.932515       1 main.go:227] handling current node
	I0731 10:38:08.935868       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I0731 10:38:08.935889       1 main.go:227] handling current node
	I0731 10:38:18.939305       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I0731 10:38:18.939327       1 main.go:227] handling current node
	I0731 10:38:28.951592       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I0731 10:38:28.951614       1 main.go:227] handling current node
	I0731 10:38:38.955745       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I0731 10:38:38.955774       1 main.go:227] handling current node
	
	* 
	* ==> kube-apiserver [1ae9d53052487389436068ecc6bb995ed5693d7bc03d910fd012f7b9a8f50f51] <==
	* I0731 10:37:14.899244       1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
	I0731 10:37:14.899297       1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
	I0731 10:37:14.904934       1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
	I0731 10:37:14.904996       1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
	I0731 10:37:14.911193       1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
	I0731 10:37:14.911316       1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
	I0731 10:37:14.911977       1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
	I0731 10:37:14.912015       1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
	I0731 10:37:14.920553       1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
	I0731 10:37:14.920669       1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
	I0731 10:37:14.922641       1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
	I0731 10:37:14.922752       1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
	I0731 10:37:14.931273       1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
	I0731 10:37:14.931323       1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
	I0731 10:37:14.932989       1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
	I0731 10:37:14.933030       1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
	W0731 10:37:15.912847       1 cacher.go:171] Terminating all watchers from cacher volumesnapshotclasses.snapshot.storage.k8s.io
	W0731 10:37:15.933691       1 cacher.go:171] Terminating all watchers from cacher volumesnapshotcontents.snapshot.storage.k8s.io
	W0731 10:37:15.941322       1 cacher.go:171] Terminating all watchers from cacher volumesnapshots.snapshot.storage.k8s.io
	E0731 10:37:40.327428       1 handler_proxy.go:144] error resolving kube-system/metrics-server: service "metrics-server" not found
	W0731 10:37:40.327448       1 handler_proxy.go:100] no RequestInfo found in the context
	E0731 10:37:40.327480       1 controller.go:113] loading OpenAPI spec for "v1beta1.metrics.k8s.io" failed with: failed to retrieve openAPI spec, http error: ResponseCode: 503, Body: service unavailable
	, Header: map[Content-Type:[text/plain; charset=utf-8] X-Content-Type-Options:[nosniff]]
	I0731 10:37:40.327486       1 controller.go:126] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
	I0731 10:38:34.118052       1 alloc.go:330] "allocated clusterIPs" service="default/hello-world-app" clusterIPs=map[IPv4:10.105.234.62]
	
	* 
	* ==> kube-controller-manager [0a571456508c83c4ab40c3794bcc071b549b6659ee9e5eb6638f03c4d1d2e8e9] <==
	* E0731 10:37:30.626587       1 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	W0731 10:37:31.775635       1 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	E0731 10:37:31.775664       1 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	W0731 10:37:33.984957       1 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	E0731 10:37:33.984991       1 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	W0731 10:37:34.749166       1 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	E0731 10:37:34.749196       1 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	W0731 10:37:48.259096       1 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	E0731 10:37:48.259124       1 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	W0731 10:37:53.139300       1 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	E0731 10:37:53.139328       1 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	W0731 10:37:57.696662       1 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	E0731 10:37:57.696690       1 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	W0731 10:38:16.291592       1 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	E0731 10:38:16.291630       1 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	W0731 10:38:16.566522       1 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	E0731 10:38:16.566550       1 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	I0731 10:38:33.960802       1 event.go:307] "Event occurred" object="default/hello-world-app" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set hello-world-app-65bdb79f98 to 1"
	I0731 10:38:33.971093       1 event.go:307] "Event occurred" object="default/hello-world-app-65bdb79f98" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: hello-world-app-65bdb79f98-f9nkc"
	W0731 10:38:35.687214       1 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	E0731 10:38:35.687243       1 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	I0731 10:38:36.340109       1 job_controller.go:523] enqueueing job ingress-nginx/ingress-nginx-admission-create
	I0731 10:38:36.344015       1 job_controller.go:523] enqueueing job ingress-nginx/ingress-nginx-admission-patch
	W0731 10:38:42.154285       1 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	E0731 10:38:42.154312       1 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
	
	* 
	* ==> kube-proxy [c2c63548d36553f9f32ad9e8ee51b1b3f9d11cd69a6922febb8472223d4fddba] <==
	* I0731 10:34:49.008479       1 node.go:141] Successfully retrieved node IP: 192.168.49.2
	I0731 10:34:49.008582       1 server_others.go:110] "Detected node IP" address="192.168.49.2"
	I0731 10:34:49.008645       1 server_others.go:554] "Using iptables proxy"
	I0731 10:34:49.713806       1 server_others.go:192] "Using iptables Proxier"
	I0731 10:34:49.713918       1 server_others.go:199] "kube-proxy running in dual-stack mode" ipFamily=IPv4
	I0731 10:34:49.713954       1 server_others.go:200] "Creating dualStackProxier for iptables"
	I0731 10:34:49.713988       1 server_others.go:484] "Detect-local-mode set to ClusterCIDR, but no IPv6 cluster CIDR defined, defaulting to no-op detect-local for IPv6"
	I0731 10:34:49.714025       1 proxier.go:253] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
	I0731 10:34:49.714708       1 server.go:658] "Version info" version="v1.27.3"
	I0731 10:34:49.715126       1 server.go:660] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
	I0731 10:34:49.716071       1 config.go:188] "Starting service config controller"
	I0731 10:34:49.717521       1 shared_informer.go:311] Waiting for caches to sync for service config
	I0731 10:34:49.716690       1 config.go:315] "Starting node config controller"
	I0731 10:34:49.717607       1 shared_informer.go:311] Waiting for caches to sync for node config
	I0731 10:34:49.717178       1 config.go:97] "Starting endpoint slice config controller"
	I0731 10:34:49.717655       1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
	I0731 10:34:49.922285       1 shared_informer.go:318] Caches are synced for endpoint slice config
	I0731 10:34:49.922428       1 shared_informer.go:318] Caches are synced for node config
	I0731 10:34:50.017818       1 shared_informer.go:318] Caches are synced for service config
	
	* 
	* ==> kube-scheduler [553026fc1df52f8253c61145f3718151f9e0efbfba89abf2e45ba736fa6dca79] <==
	* W0731 10:34:30.017292       1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
	E0731 10:34:30.017322       1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
	W0731 10:34:30.017329       1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
	E0731 10:34:30.017343       1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
	W0731 10:34:30.017385       1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
	E0731 10:34:30.017397       1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
	W0731 10:34:30.017419       1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
	E0731 10:34:30.017468       1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
	W0731 10:34:30.017744       1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
	E0731 10:34:30.017816       1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
	W0731 10:34:30.017889       1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
	W0731 10:34:30.017418       1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
	E0731 10:34:30.018100       1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
	E0731 10:34:30.018282       1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
	W0731 10:34:30.018759       1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
	E0731 10:34:30.019928       1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
	W0731 10:34:30.849964       1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
	E0731 10:34:30.849999       1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
	W0731 10:34:30.949852       1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
	E0731 10:34:30.949889       1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
	W0731 10:34:31.012497       1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
	E0731 10:34:31.012527       1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
	W0731 10:34:31.179288       1 reflector.go:533] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
	E0731 10:34:31.179314       1 reflector.go:148] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
	I0731 10:34:33.010032       1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
	
	* 
	* ==> kubelet <==
	* Jul 31 10:38:35 addons-764200 kubelet[1561]: I0731 10:38:35.126480    1561 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zczvc\" (UniqueName: \"kubernetes.io/projected/e033e139-c37e-4e6c-a59d-13b69a05c5db-kube-api-access-zczvc\") pod \"e033e139-c37e-4e6c-a59d-13b69a05c5db\" (UID: \"e033e139-c37e-4e6c-a59d-13b69a05c5db\") "
	Jul 31 10:38:35 addons-764200 kubelet[1561]: I0731 10:38:35.128570    1561 operation_generator.go:878] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e033e139-c37e-4e6c-a59d-13b69a05c5db-kube-api-access-zczvc" (OuterVolumeSpecName: "kube-api-access-zczvc") pod "e033e139-c37e-4e6c-a59d-13b69a05c5db" (UID: "e033e139-c37e-4e6c-a59d-13b69a05c5db"). InnerVolumeSpecName "kube-api-access-zczvc". PluginName "kubernetes.io/projected", VolumeGidValue ""
	Jul 31 10:38:35 addons-764200 kubelet[1561]: I0731 10:38:35.227014    1561 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-zczvc\" (UniqueName: \"kubernetes.io/projected/e033e139-c37e-4e6c-a59d-13b69a05c5db-kube-api-access-zczvc\") on node \"addons-764200\" DevicePath \"\""
	Jul 31 10:38:35 addons-764200 kubelet[1561]: I0731 10:38:35.831872    1561 scope.go:115] "RemoveContainer" containerID="752898d2fd5c49b9079e8b70a5d3e5161b40ae2aa779ff42522a6e079bc2be4b"
	Jul 31 10:38:35 addons-764200 kubelet[1561]: I0731 10:38:35.840582    1561 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="default/hello-world-app-65bdb79f98-f9nkc" podStartSLOduration=1.986139 podCreationTimestamp="2023-07-31 10:38:33 +0000 UTC" firstStartedPulling="2023-07-31 10:38:34.409134643 +0000 UTC m=+241.669197793" lastFinishedPulling="2023-07-31 10:38:35.263531354 +0000 UTC m=+242.523594510" observedRunningTime="2023-07-31 10:38:35.840328574 +0000 UTC m=+243.100391742" watchObservedRunningTime="2023-07-31 10:38:35.840535717 +0000 UTC m=+243.100598883"
	Jul 31 10:38:35 addons-764200 kubelet[1561]: I0731 10:38:35.849367    1561 scope.go:115] "RemoveContainer" containerID="752898d2fd5c49b9079e8b70a5d3e5161b40ae2aa779ff42522a6e079bc2be4b"
	Jul 31 10:38:35 addons-764200 kubelet[1561]: E0731 10:38:35.849767    1561 remote_runtime.go:415] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"752898d2fd5c49b9079e8b70a5d3e5161b40ae2aa779ff42522a6e079bc2be4b\": container with ID starting with 752898d2fd5c49b9079e8b70a5d3e5161b40ae2aa779ff42522a6e079bc2be4b not found: ID does not exist" containerID="752898d2fd5c49b9079e8b70a5d3e5161b40ae2aa779ff42522a6e079bc2be4b"
	Jul 31 10:38:35 addons-764200 kubelet[1561]: I0731 10:38:35.849820    1561 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={Type:cri-o ID:752898d2fd5c49b9079e8b70a5d3e5161b40ae2aa779ff42522a6e079bc2be4b} err="failed to get container status \"752898d2fd5c49b9079e8b70a5d3e5161b40ae2aa779ff42522a6e079bc2be4b\": rpc error: code = NotFound desc = could not find container \"752898d2fd5c49b9079e8b70a5d3e5161b40ae2aa779ff42522a6e079bc2be4b\": container with ID starting with 752898d2fd5c49b9079e8b70a5d3e5161b40ae2aa779ff42522a6e079bc2be4b not found: ID does not exist"
	Jul 31 10:38:36 addons-764200 kubelet[1561]: E0731 10:38:36.351357    1561 event.go:280] Server rejected event '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"ingress-nginx-controller-7799c6795f-bscvk.1776ed90d040e1b8", GenerateName:"", Namespace:"ingress-nginx", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"ingress-nginx", Name:"ingress-nginx-controller-7799c6795f-bscvk", UID:"c349b648-2877-496c-816d-60ceee2891a5", APIVersion:"v1", ResourceVersion:"773", FieldPath:"spec.containers{controller}"}, Reason:"Killing", Message:"Stopping container controller", Source:v1.EventSource{Componen
t:"kubelet", Host:"addons-764200"}, FirstTimestamp:time.Date(2023, time.July, 31, 10, 38, 36, 349186488, time.Local), LastTimestamp:time.Date(2023, time.July, 31, 10, 38, 36, 349186488, time.Local), Count:1, Type:"Normal", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'events "ingress-nginx-controller-7799c6795f-bscvk.1776ed90d040e1b8" is forbidden: unable to create new content in namespace ingress-nginx because it is being terminated' (will not retry!)
	Jul 31 10:38:36 addons-764200 kubelet[1561]: I0731 10:38:36.824559    1561 kubelet_volumes.go:161] "Cleaned up orphaned pod volumes dir" podUID=0df6f8f8-c241-4511-8d93-fc8044ee4cef path="/var/lib/kubelet/pods/0df6f8f8-c241-4511-8d93-fc8044ee4cef/volumes"
	Jul 31 10:38:36 addons-764200 kubelet[1561]: I0731 10:38:36.824957    1561 kubelet_volumes.go:161] "Cleaned up orphaned pod volumes dir" podUID=10963cf1-2988-4691-961b-9c93998facc7 path="/var/lib/kubelet/pods/10963cf1-2988-4691-961b-9c93998facc7/volumes"
	Jul 31 10:38:36 addons-764200 kubelet[1561]: I0731 10:38:36.825365    1561 kubelet_volumes.go:161] "Cleaned up orphaned pod volumes dir" podUID=e033e139-c37e-4e6c-a59d-13b69a05c5db path="/var/lib/kubelet/pods/e033e139-c37e-4e6c-a59d-13b69a05c5db/volumes"
	Jul 31 10:38:37 addons-764200 kubelet[1561]: I0731 10:38:37.743442    1561 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c349b648-2877-496c-816d-60ceee2891a5-webhook-cert\") pod \"c349b648-2877-496c-816d-60ceee2891a5\" (UID: \"c349b648-2877-496c-816d-60ceee2891a5\") "
	Jul 31 10:38:37 addons-764200 kubelet[1561]: I0731 10:38:37.743573    1561 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-55x8r\" (UniqueName: \"kubernetes.io/projected/c349b648-2877-496c-816d-60ceee2891a5-kube-api-access-55x8r\") pod \"c349b648-2877-496c-816d-60ceee2891a5\" (UID: \"c349b648-2877-496c-816d-60ceee2891a5\") "
	Jul 31 10:38:37 addons-764200 kubelet[1561]: I0731 10:38:37.745675    1561 operation_generator.go:878] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c349b648-2877-496c-816d-60ceee2891a5-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "c349b648-2877-496c-816d-60ceee2891a5" (UID: "c349b648-2877-496c-816d-60ceee2891a5"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue ""
	Jul 31 10:38:37 addons-764200 kubelet[1561]: I0731 10:38:37.747195    1561 operation_generator.go:878] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c349b648-2877-496c-816d-60ceee2891a5-kube-api-access-55x8r" (OuterVolumeSpecName: "kube-api-access-55x8r") pod "c349b648-2877-496c-816d-60ceee2891a5" (UID: "c349b648-2877-496c-816d-60ceee2891a5"). InnerVolumeSpecName "kube-api-access-55x8r". PluginName "kubernetes.io/projected", VolumeGidValue ""
	Jul 31 10:38:37 addons-764200 kubelet[1561]: I0731 10:38:37.837945    1561 scope.go:115] "RemoveContainer" containerID="c0456423168d7670558eaefa6eb6b70bbc1d5f7fc6f7bd99ba0b04da94e33dec"
	Jul 31 10:38:37 addons-764200 kubelet[1561]: I0731 10:38:37.844137    1561 reconciler_common.go:300] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c349b648-2877-496c-816d-60ceee2891a5-webhook-cert\") on node \"addons-764200\" DevicePath \"\""
	Jul 31 10:38:37 addons-764200 kubelet[1561]: I0731 10:38:37.844172    1561 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-55x8r\" (UniqueName: \"kubernetes.io/projected/c349b648-2877-496c-816d-60ceee2891a5-kube-api-access-55x8r\") on node \"addons-764200\" DevicePath \"\""
	Jul 31 10:38:37 addons-764200 kubelet[1561]: I0731 10:38:37.854604    1561 scope.go:115] "RemoveContainer" containerID="c0456423168d7670558eaefa6eb6b70bbc1d5f7fc6f7bd99ba0b04da94e33dec"
	Jul 31 10:38:37 addons-764200 kubelet[1561]: E0731 10:38:37.854933    1561 remote_runtime.go:415] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c0456423168d7670558eaefa6eb6b70bbc1d5f7fc6f7bd99ba0b04da94e33dec\": container with ID starting with c0456423168d7670558eaefa6eb6b70bbc1d5f7fc6f7bd99ba0b04da94e33dec not found: ID does not exist" containerID="c0456423168d7670558eaefa6eb6b70bbc1d5f7fc6f7bd99ba0b04da94e33dec"
	Jul 31 10:38:37 addons-764200 kubelet[1561]: I0731 10:38:37.854967    1561 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={Type:cri-o ID:c0456423168d7670558eaefa6eb6b70bbc1d5f7fc6f7bd99ba0b04da94e33dec} err="failed to get container status \"c0456423168d7670558eaefa6eb6b70bbc1d5f7fc6f7bd99ba0b04da94e33dec\": rpc error: code = NotFound desc = could not find container \"c0456423168d7670558eaefa6eb6b70bbc1d5f7fc6f7bd99ba0b04da94e33dec\": container with ID starting with c0456423168d7670558eaefa6eb6b70bbc1d5f7fc6f7bd99ba0b04da94e33dec not found: ID does not exist"
	Jul 31 10:38:38 addons-764200 kubelet[1561]: I0731 10:38:38.824136    1561 kubelet_volumes.go:161] "Cleaned up orphaned pod volumes dir" podUID=c349b648-2877-496c-816d-60ceee2891a5 path="/var/lib/kubelet/pods/c349b648-2877-496c-816d-60ceee2891a5/volumes"
	Jul 31 10:38:38 addons-764200 kubelet[1561]: W0731 10:38:38.872485    1561 container.go:586] Failed to update stats for container "/crio-79ace5646001320d35850e7863b69d51cc2ef6284b44cf4e753725306d60c032": unable to determine device info for dir: /var/lib/containers/storage/overlay/095e56e7dd158abd32d1bd81673047777e159e39c25f1f26e2bba07a18d0ddc2/diff: stat failed on /var/lib/containers/storage/overlay/095e56e7dd158abd32d1bd81673047777e159e39c25f1f26e2bba07a18d0ddc2/diff with error: no such file or directory, continuing to push stats
	Jul 31 10:38:44 addons-764200 kubelet[1561]: W0731 10:38:44.293719    1561 container.go:586] Failed to update stats for container "/crio-69e2b8bb52dd630929fdc1a0c1041e711560acd4d7a4886952ca8e5c60c4c7a2": unable to determine device info for dir: /var/lib/containers/storage/overlay/2fb7496eb555100a877f6935ad3e0ab41ddda67f58e211f5620ede33864b1d7a/diff: stat failed on /var/lib/containers/storage/overlay/2fb7496eb555100a877f6935ad3e0ab41ddda67f58e211f5620ede33864b1d7a/diff with error: no such file or directory, continuing to push stats
	
	* 
	* ==> storage-provisioner [d2bf13367ca800d8faa8bb2c70548eba8bb810e21cb6af43790f5d70f0a7d881] <==
	* I0731 10:35:20.349609       1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
	I0731 10:35:20.359651       1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
	I0731 10:35:20.359688       1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
	I0731 10:35:20.405376       1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
	I0731 10:35:20.405524       1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_addons-764200_da9a6909-8e87-424f-a2d6-feae5a4bd413!
	I0731 10:35:20.405525       1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"d9d27ecf-259a-42a7-8cf4-d2e46fd3548f", APIVersion:"v1", ResourceVersion:"858", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' addons-764200_da9a6909-8e87-424f-a2d6-feae5a4bd413 became leader
	I0731 10:35:20.505731       1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_addons-764200_da9a6909-8e87-424f-a2d6-feae5a4bd413!
	

                                                
                                                
-- /stdout --
helpers_test.go:254: (dbg) Run:  out/minikube-linux-amd64 status --format={{.APIServer}} -p addons-764200 -n addons-764200
helpers_test.go:261: (dbg) Run:  kubectl --context addons-764200 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:285: <<< TestAddons/parallel/Ingress FAILED: end of post-mortem logs <<<
helpers_test.go:286: ---------------------/post-mortem---------------------------------
--- FAIL: TestAddons/parallel/Ingress (166.41s)

                                                
                                    
x
+
TestFunctional/parallel/MountCmd/specific-port (14.58s)

                                                
                                                
=== RUN   TestFunctional/parallel/MountCmd/specific-port
functional_test_mount_test.go:213: (dbg) daemon: [out/minikube-linux-amd64 mount -p functional-683521 /tmp/TestFunctionalparallelMountCmdspecific-port409478524/001:/mount-9p --alsologtostderr -v=1 --port 46464]
functional_test_mount_test.go:243: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh "findmnt -T /mount-9p | grep 9p"
functional_test_mount_test.go:243: (dbg) Non-zero exit: out/minikube-linux-amd64 -p functional-683521 ssh "findmnt -T /mount-9p | grep 9p": exit status 1 (303.106936ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
functional_test_mount_test.go:243: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh "findmnt -T /mount-9p | grep 9p"
functional_test_mount_test.go:243: (dbg) Non-zero exit: out/minikube-linux-amd64 -p functional-683521 ssh "findmnt -T /mount-9p | grep 9p": exit status 1 (270.781061ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
functional_test_mount_test.go:243: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh "findmnt -T /mount-9p | grep 9p"
functional_test_mount_test.go:243: (dbg) Non-zero exit: out/minikube-linux-amd64 -p functional-683521 ssh "findmnt -T /mount-9p | grep 9p": exit status 1 (273.010578ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
2023/07/31 10:42:36 [DEBUG] GET http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/
functional_test_mount_test.go:243: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh "findmnt -T /mount-9p | grep 9p"
functional_test_mount_test.go:243: (dbg) Non-zero exit: out/minikube-linux-amd64 -p functional-683521 ssh "findmnt -T /mount-9p | grep 9p": exit status 1 (271.907404ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
functional_test_mount_test.go:243: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh "findmnt -T /mount-9p | grep 9p"
functional_test_mount_test.go:243: (dbg) Non-zero exit: out/minikube-linux-amd64 -p functional-683521 ssh "findmnt -T /mount-9p | grep 9p": exit status 1 (343.050078ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
functional_test_mount_test.go:243: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh "findmnt -T /mount-9p | grep 9p"
functional_test_mount_test.go:243: (dbg) Non-zero exit: out/minikube-linux-amd64 -p functional-683521 ssh "findmnt -T /mount-9p | grep 9p": exit status 1 (437.130717ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
functional_test_mount_test.go:243: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh "findmnt -T /mount-9p | grep 9p"
functional_test_mount_test.go:243: (dbg) Non-zero exit: out/minikube-linux-amd64 -p functional-683521 ssh "findmnt -T /mount-9p | grep 9p": exit status 1 (243.79497ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
functional_test_mount_test.go:253: /mount-9p did not appear within 13.910910347s: exit status 1
functional_test_mount_test.go:220: "TestFunctional/parallel/MountCmd/specific-port" failed, getting debug info...
functional_test_mount_test.go:221: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh "mount | grep 9p; ls -la /mount-9p; cat /mount-9p/pod-dates"
functional_test_mount_test.go:221: (dbg) Non-zero exit: out/minikube-linux-amd64 -p functional-683521 ssh "mount | grep 9p; ls -la /mount-9p; cat /mount-9p/pod-dates": exit status 1 (252.619158ms)

                                                
                                                
-- stdout --
	total 8
	drwxr-xr-x 2 root root 4096 Jul 31 10:42 .
	drwxr-xr-x 1 root root 4096 Jul 31 10:42 ..
	cat: /mount-9p/pod-dates: No such file or directory

                                                
                                                
-- /stdout --
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
functional_test_mount_test.go:223: debugging command "out/minikube-linux-amd64 -p functional-683521 ssh \"mount | grep 9p; ls -la /mount-9p; cat /mount-9p/pod-dates\"" failed : exit status 1
functional_test_mount_test.go:230: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh "sudo umount -f /mount-9p"
functional_test_mount_test.go:230: (dbg) Non-zero exit: out/minikube-linux-amd64 -p functional-683521 ssh "sudo umount -f /mount-9p": exit status 1 (304.84312ms)

                                                
                                                
-- stdout --
	umount: /mount-9p: not mounted.

                                                
                                                
-- /stdout --
** stderr ** 
	ssh: Process exited with status 32

                                                
                                                
** /stderr **
functional_test_mount_test.go:232: "out/minikube-linux-amd64 -p functional-683521 ssh \"sudo umount -f /mount-9p\"": exit status 1
functional_test_mount_test.go:234: (dbg) stopping [out/minikube-linux-amd64 mount -p functional-683521 /tmp/TestFunctionalparallelMountCmdspecific-port409478524/001:/mount-9p --alsologtostderr -v=1 --port 46464] ...
functional_test_mount_test.go:234: (dbg) [out/minikube-linux-amd64 mount -p functional-683521 /tmp/TestFunctionalparallelMountCmdspecific-port409478524/001:/mount-9p --alsologtostderr -v=1 --port 46464] stdout:

                                                
                                                

                                                
                                                
functional_test_mount_test.go:234: (dbg) [out/minikube-linux-amd64 mount -p functional-683521 /tmp/TestFunctionalparallelMountCmdspecific-port409478524/001:/mount-9p --alsologtostderr -v=1 --port 46464] stderr:
I0731 10:42:32.898441   47055 out.go:296] Setting OutFile to fd 1 ...
I0731 10:42:32.898646   47055 out.go:343] TERM=,COLORTERM=, which probably does not support color
I0731 10:42:32.898656   47055 out.go:309] Setting ErrFile to fd 2...
I0731 10:42:32.898663   47055 out.go:343] TERM=,COLORTERM=, which probably does not support color
I0731 10:42:32.898971   47055 root.go:338] Updating PATH: /home/jenkins/minikube-integration/16969-5799/.minikube/bin
I0731 10:42:32.899280   47055 mustload.go:65] Loading cluster: functional-683521
I0731 10:42:32.899728   47055 config.go:182] Loaded profile config "functional-683521": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.27.3
I0731 10:42:32.900316   47055 cli_runner.go:164] Run: docker container inspect functional-683521 --format={{.State.Status}}
I0731 10:42:32.922877   47055 host.go:66] Checking if "functional-683521" exists ...
I0731 10:42:32.923186   47055 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0731 10:42:33.004092   47055 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:34 OomKillDisable:true NGoroutines:49 SystemTime:2023-07-31 10:42:32.993012339 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1038-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archit
ecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33648062464 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-15 Labels:[] ExperimentalBuild:false ServerVersion:24.0.5 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:3dce8eb055cbb6872793272b4f20ed16117344f8 Expected:3dce8eb055cbb6872793272b4f20ed16117344f8} RuncCommit:{ID:v1.1.7-0-g860f061 Expected:v1.1.7-0-g860f061} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.20.2] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0731 10:42:33.004271   47055 cli_runner.go:164] Run: docker network inspect functional-683521 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0731 10:42:33.027040   47055 out.go:177] 
W0731 10:42:33.028356   47055 out.go:239] X Exiting due to IF_MOUNT_PORT: Error finding port for mount: Error accessing port 46464
X Exiting due to IF_MOUNT_PORT: Error finding port for mount: Error accessing port 46464
W0731 10:42:33.028390   47055 out.go:239] * 
* 
W0731 10:42:33.031396   47055 out.go:239] ╭─────────────────────────────────────────────────────────────────────────────────────────────╮
│                                                                                             │
│    * If the above advice does not help, please let us know:                                 │
│      https://github.com/kubernetes/minikube/issues/new/choose                               │
│                                                                                             │
│    * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue.    │
│    * Please also attach the following file to the GitHub issue:                             │
│    * - /tmp/minikube_mount_f4781619e9135ed9d8032079ca9abef29f8fe1ee_0.log                   │
│                                                                                             │
╰─────────────────────────────────────────────────────────────────────────────────────────────╯
╭─────────────────────────────────────────────────────────────────────────────────────────────╮
│                                                                                             │
│    * If the above advice does not help, please let us know:                                 │
│      https://github.com/kubernetes/minikube/issues/new/choose                               │
│                                                                                             │
│    * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue.    │
│    * Please also attach the following file to the GitHub issue:                             │
│    * - /tmp/minikube_mount_f4781619e9135ed9d8032079ca9abef29f8fe1ee_0.log                   │
│                                                                                             │
╰─────────────────────────────────────────────────────────────────────────────────────────────╯
I0731 10:42:33.032850   47055 out.go:177] 
--- FAIL: TestFunctional/parallel/MountCmd/specific-port (14.58s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageLoadFromFile (7.31s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageLoadFromFile
functional_test.go:408: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 image load /home/jenkins/workspace/Docker_Linux_crio_integration/addon-resizer-save.tar --alsologtostderr
functional_test.go:408: (dbg) Done: out/minikube-linux-amd64 -p functional-683521 image load /home/jenkins/workspace/Docker_Linux_crio_integration/addon-resizer-save.tar --alsologtostderr: (5.092815712s)
functional_test.go:447: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 image ls
functional_test.go:447: (dbg) Done: out/minikube-linux-amd64 -p functional-683521 image ls: (2.214142551s)
functional_test.go:442: expected "gcr.io/google-containers/addon-resizer:functional-683521" to be loaded into minikube but the image is not there
--- FAIL: TestFunctional/parallel/ImageCommands/ImageLoadFromFile (7.31s)

                                                
                                    
x
+
TestIngressAddonLegacy/serial/ValidateIngressAddons (174.38s)

                                                
                                                
=== RUN   TestIngressAddonLegacy/serial/ValidateIngressAddons
addons_test.go:183: (dbg) Run:  kubectl --context ingress-addon-legacy-538476 wait --for=condition=ready --namespace=ingress-nginx pod --selector=app.kubernetes.io/component=controller --timeout=90s
addons_test.go:183: (dbg) Done: kubectl --context ingress-addon-legacy-538476 wait --for=condition=ready --namespace=ingress-nginx pod --selector=app.kubernetes.io/component=controller --timeout=90s: (8.353720256s)
addons_test.go:208: (dbg) Run:  kubectl --context ingress-addon-legacy-538476 replace --force -f testdata/nginx-ingress-v1beta1.yaml
addons_test.go:221: (dbg) Run:  kubectl --context ingress-addon-legacy-538476 replace --force -f testdata/nginx-pod-svc.yaml
addons_test.go:226: (dbg) TestIngressAddonLegacy/serial/ValidateIngressAddons: waiting 8m0s for pods matching "run=nginx" in namespace "default" ...
helpers_test.go:344: "nginx" [d683b27c-9766-423e-b150-8402ab95a627] Pending / Ready:ContainersNotReady (containers with unready status: [nginx]) / ContainersReady:ContainersNotReady (containers with unready status: [nginx])
helpers_test.go:344: "nginx" [d683b27c-9766-423e-b150-8402ab95a627] Running
addons_test.go:226: (dbg) TestIngressAddonLegacy/serial/ValidateIngressAddons: run=nginx healthy within 9.007056832s
addons_test.go:238: (dbg) Run:  out/minikube-linux-amd64 -p ingress-addon-legacy-538476 ssh "curl -s http://127.0.0.1/ -H 'Host: nginx.example.com'"
E0731 10:45:58.881947   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/client.crt: no such file or directory
E0731 10:46:26.568029   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/client.crt: no such file or directory
addons_test.go:238: (dbg) Non-zero exit: out/minikube-linux-amd64 -p ingress-addon-legacy-538476 ssh "curl -s http://127.0.0.1/ -H 'Host: nginx.example.com'": exit status 1 (2m10.534460086s)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 28

                                                
                                                
** /stderr **
addons_test.go:254: failed to get expected response from http://127.0.0.1/ within minikube: exit status 1
addons_test.go:262: (dbg) Run:  kubectl --context ingress-addon-legacy-538476 replace --force -f testdata/ingress-dns-example-v1beta1.yaml
addons_test.go:267: (dbg) Run:  out/minikube-linux-amd64 -p ingress-addon-legacy-538476 ip
addons_test.go:273: (dbg) Run:  nslookup hello-john.test 192.168.49.2
E0731 10:47:12.322371   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/functional-683521/client.crt: no such file or directory
E0731 10:47:12.327673   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/functional-683521/client.crt: no such file or directory
E0731 10:47:12.337920   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/functional-683521/client.crt: no such file or directory
E0731 10:47:12.358172   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/functional-683521/client.crt: no such file or directory
E0731 10:47:12.398449   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/functional-683521/client.crt: no such file or directory
E0731 10:47:12.478768   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/functional-683521/client.crt: no such file or directory
E0731 10:47:12.639202   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/functional-683521/client.crt: no such file or directory
E0731 10:47:12.959795   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/functional-683521/client.crt: no such file or directory
E0731 10:47:13.600769   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/functional-683521/client.crt: no such file or directory
E0731 10:47:14.881246   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/functional-683521/client.crt: no such file or directory
addons_test.go:273: (dbg) Non-zero exit: nslookup hello-john.test 192.168.49.2: exit status 1 (15.007972219s)

                                                
                                                
-- stdout --
	;; connection timed out; no servers could be reached
	
	

                                                
                                                
-- /stdout --
addons_test.go:275: failed to nslookup hello-john.test host. args "nslookup hello-john.test 192.168.49.2" : exit status 1
addons_test.go:279: unexpected output from nslookup. stdout: ;; connection timed out; no servers could be reached

                                                
                                                

                                                
                                                

                                                
                                                
stderr: 
addons_test.go:282: (dbg) Run:  out/minikube-linux-amd64 -p ingress-addon-legacy-538476 addons disable ingress-dns --alsologtostderr -v=1
E0731 10:47:17.441729   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/functional-683521/client.crt: no such file or directory
addons_test.go:282: (dbg) Done: out/minikube-linux-amd64 -p ingress-addon-legacy-538476 addons disable ingress-dns --alsologtostderr -v=1: (1.591452343s)
addons_test.go:287: (dbg) Run:  out/minikube-linux-amd64 -p ingress-addon-legacy-538476 addons disable ingress --alsologtostderr -v=1
E0731 10:47:22.562396   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/functional-683521/client.crt: no such file or directory
addons_test.go:287: (dbg) Done: out/minikube-linux-amd64 -p ingress-addon-legacy-538476 addons disable ingress --alsologtostderr -v=1: (7.381649326s)
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======>  post-mortem[TestIngressAddonLegacy/serial/ValidateIngressAddons]: docker inspect <======
helpers_test.go:231: (dbg) Run:  docker inspect ingress-addon-legacy-538476
helpers_test.go:235: (dbg) docker inspect ingress-addon-legacy-538476:

                                                
                                                
-- stdout --
	[
	    {
	        "Id": "ddc807f676f3c79cf790cac2393305c7fd24a541731a2e987d238a98e9094e19",
	        "Created": "2023-07-31T10:43:16.491460718Z",
	        "Path": "/usr/local/bin/entrypoint",
	        "Args": [
	            "/sbin/init"
	        ],
	        "State": {
	            "Status": "running",
	            "Running": true,
	            "Paused": false,
	            "Restarting": false,
	            "OOMKilled": false,
	            "Dead": false,
	            "Pid": 51776,
	            "ExitCode": 0,
	            "Error": "",
	            "StartedAt": "2023-07-31T10:43:16.777349724Z",
	            "FinishedAt": "0001-01-01T00:00:00Z"
	        },
	        "Image": "sha256:c6cc01e6091959400f260dc442708e7c71630b58dab1f7c344cb00926bd84950",
	        "ResolvConfPath": "/var/lib/docker/containers/ddc807f676f3c79cf790cac2393305c7fd24a541731a2e987d238a98e9094e19/resolv.conf",
	        "HostnamePath": "/var/lib/docker/containers/ddc807f676f3c79cf790cac2393305c7fd24a541731a2e987d238a98e9094e19/hostname",
	        "HostsPath": "/var/lib/docker/containers/ddc807f676f3c79cf790cac2393305c7fd24a541731a2e987d238a98e9094e19/hosts",
	        "LogPath": "/var/lib/docker/containers/ddc807f676f3c79cf790cac2393305c7fd24a541731a2e987d238a98e9094e19/ddc807f676f3c79cf790cac2393305c7fd24a541731a2e987d238a98e9094e19-json.log",
	        "Name": "/ingress-addon-legacy-538476",
	        "RestartCount": 0,
	        "Driver": "overlay2",
	        "Platform": "linux",
	        "MountLabel": "",
	        "ProcessLabel": "",
	        "AppArmorProfile": "unconfined",
	        "ExecIDs": null,
	        "HostConfig": {
	            "Binds": [
	                "/lib/modules:/lib/modules:ro",
	                "ingress-addon-legacy-538476:/var"
	            ],
	            "ContainerIDFile": "",
	            "LogConfig": {
	                "Type": "json-file",
	                "Config": {
	                    "max-size": "100m"
	                }
	            },
	            "NetworkMode": "ingress-addon-legacy-538476",
	            "PortBindings": {
	                "22/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "2376/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "32443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "5000/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "8443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ]
	            },
	            "RestartPolicy": {
	                "Name": "no",
	                "MaximumRetryCount": 0
	            },
	            "AutoRemove": false,
	            "VolumeDriver": "",
	            "VolumesFrom": null,
	            "ConsoleSize": [
	                0,
	                0
	            ],
	            "CapAdd": null,
	            "CapDrop": null,
	            "CgroupnsMode": "host",
	            "Dns": [],
	            "DnsOptions": [],
	            "DnsSearch": [],
	            "ExtraHosts": null,
	            "GroupAdd": null,
	            "IpcMode": "private",
	            "Cgroup": "",
	            "Links": null,
	            "OomScoreAdj": 0,
	            "PidMode": "",
	            "Privileged": true,
	            "PublishAllPorts": false,
	            "ReadonlyRootfs": false,
	            "SecurityOpt": [
	                "seccomp=unconfined",
	                "apparmor=unconfined",
	                "label=disable"
	            ],
	            "Tmpfs": {
	                "/run": "",
	                "/tmp": ""
	            },
	            "UTSMode": "",
	            "UsernsMode": "",
	            "ShmSize": 67108864,
	            "Runtime": "runc",
	            "Isolation": "",
	            "CpuShares": 0,
	            "Memory": 4294967296,
	            "NanoCpus": 2000000000,
	            "CgroupParent": "",
	            "BlkioWeight": 0,
	            "BlkioWeightDevice": [],
	            "BlkioDeviceReadBps": [],
	            "BlkioDeviceWriteBps": [],
	            "BlkioDeviceReadIOps": [],
	            "BlkioDeviceWriteIOps": [],
	            "CpuPeriod": 0,
	            "CpuQuota": 0,
	            "CpuRealtimePeriod": 0,
	            "CpuRealtimeRuntime": 0,
	            "CpusetCpus": "",
	            "CpusetMems": "",
	            "Devices": [],
	            "DeviceCgroupRules": null,
	            "DeviceRequests": null,
	            "MemoryReservation": 0,
	            "MemorySwap": 8589934592,
	            "MemorySwappiness": null,
	            "OomKillDisable": false,
	            "PidsLimit": null,
	            "Ulimits": null,
	            "CpuCount": 0,
	            "CpuPercent": 0,
	            "IOMaximumIOps": 0,
	            "IOMaximumBandwidth": 0,
	            "MaskedPaths": null,
	            "ReadonlyPaths": null
	        },
	        "GraphDriver": {
	            "Data": {
	                "LowerDir": "/var/lib/docker/overlay2/9383ce07c6c2d226ec65ac02aa57b7bf83f7fd7a125fc5041ca07278b8bfb61e-init/diff:/var/lib/docker/overlay2/738d78659811af5605d784380774f3996551e9a95d42d3d998a185d72e7e9dcf/diff",
	                "MergedDir": "/var/lib/docker/overlay2/9383ce07c6c2d226ec65ac02aa57b7bf83f7fd7a125fc5041ca07278b8bfb61e/merged",
	                "UpperDir": "/var/lib/docker/overlay2/9383ce07c6c2d226ec65ac02aa57b7bf83f7fd7a125fc5041ca07278b8bfb61e/diff",
	                "WorkDir": "/var/lib/docker/overlay2/9383ce07c6c2d226ec65ac02aa57b7bf83f7fd7a125fc5041ca07278b8bfb61e/work"
	            },
	            "Name": "overlay2"
	        },
	        "Mounts": [
	            {
	                "Type": "bind",
	                "Source": "/lib/modules",
	                "Destination": "/lib/modules",
	                "Mode": "ro",
	                "RW": false,
	                "Propagation": "rprivate"
	            },
	            {
	                "Type": "volume",
	                "Name": "ingress-addon-legacy-538476",
	                "Source": "/var/lib/docker/volumes/ingress-addon-legacy-538476/_data",
	                "Destination": "/var",
	                "Driver": "local",
	                "Mode": "z",
	                "RW": true,
	                "Propagation": ""
	            }
	        ],
	        "Config": {
	            "Hostname": "ingress-addon-legacy-538476",
	            "Domainname": "",
	            "User": "",
	            "AttachStdin": false,
	            "AttachStdout": false,
	            "AttachStderr": false,
	            "ExposedPorts": {
	                "22/tcp": {},
	                "2376/tcp": {},
	                "32443/tcp": {},
	                "5000/tcp": {},
	                "8443/tcp": {}
	            },
	            "Tty": true,
	            "OpenStdin": false,
	            "StdinOnce": false,
	            "Env": [
	                "container=docker",
	                "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
	            ],
	            "Cmd": null,
	            "Image": "gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631",
	            "Volumes": null,
	            "WorkingDir": "/",
	            "Entrypoint": [
	                "/usr/local/bin/entrypoint",
	                "/sbin/init"
	            ],
	            "OnBuild": null,
	            "Labels": {
	                "created_by.minikube.sigs.k8s.io": "true",
	                "mode.minikube.sigs.k8s.io": "ingress-addon-legacy-538476",
	                "name.minikube.sigs.k8s.io": "ingress-addon-legacy-538476",
	                "role.minikube.sigs.k8s.io": ""
	            },
	            "StopSignal": "SIGRTMIN+3"
	        },
	        "NetworkSettings": {
	            "Bridge": "",
	            "SandboxID": "338575d3b2fa56eb2395b9d33705597d2c74646b2caa5ad9ab989cdfe842b30f",
	            "HairpinMode": false,
	            "LinkLocalIPv6Address": "",
	            "LinkLocalIPv6PrefixLen": 0,
	            "Ports": {
	                "22/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "32787"
	                    }
	                ],
	                "2376/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "32786"
	                    }
	                ],
	                "32443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "32783"
	                    }
	                ],
	                "5000/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "32785"
	                    }
	                ],
	                "8443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "32784"
	                    }
	                ]
	            },
	            "SandboxKey": "/var/run/docker/netns/338575d3b2fa",
	            "SecondaryIPAddresses": null,
	            "SecondaryIPv6Addresses": null,
	            "EndpointID": "",
	            "Gateway": "",
	            "GlobalIPv6Address": "",
	            "GlobalIPv6PrefixLen": 0,
	            "IPAddress": "",
	            "IPPrefixLen": 0,
	            "IPv6Gateway": "",
	            "MacAddress": "",
	            "Networks": {
	                "ingress-addon-legacy-538476": {
	                    "IPAMConfig": {
	                        "IPv4Address": "192.168.49.2"
	                    },
	                    "Links": null,
	                    "Aliases": [
	                        "ddc807f676f3",
	                        "ingress-addon-legacy-538476"
	                    ],
	                    "NetworkID": "b111addb01301819b0af5442da62c21796017022436a9719d7af048a1318f5c3",
	                    "EndpointID": "0fc98772d3be071752b5dce6e60160ddd069cb63fbdf8ea5326de467a2acc0eb",
	                    "Gateway": "192.168.49.1",
	                    "IPAddress": "192.168.49.2",
	                    "IPPrefixLen": 24,
	                    "IPv6Gateway": "",
	                    "GlobalIPv6Address": "",
	                    "GlobalIPv6PrefixLen": 0,
	                    "MacAddress": "02:42:c0:a8:31:02",
	                    "DriverOpts": null
	                }
	            }
	        }
	    }
	]

                                                
                                                
-- /stdout --
helpers_test.go:239: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Host}} -p ingress-addon-legacy-538476 -n ingress-addon-legacy-538476
helpers_test.go:244: <<< TestIngressAddonLegacy/serial/ValidateIngressAddons FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======>  post-mortem[TestIngressAddonLegacy/serial/ValidateIngressAddons]: minikube logs <======
helpers_test.go:247: (dbg) Run:  out/minikube-linux-amd64 -p ingress-addon-legacy-538476 logs -n 25
helpers_test.go:252: TestIngressAddonLegacy/serial/ValidateIngressAddons logs: 
-- stdout --
	* 
	* ==> Audit <==
	* |---------|------------------------------------------------------------------------|-----------------------------|---------|---------|---------------------|---------------------|
	| Command |                                  Args                                  |           Profile           |  User   | Version |     Start Time      |      End Time       |
	|---------|------------------------------------------------------------------------|-----------------------------|---------|---------|---------------------|---------------------|
	| ssh     | functional-683521 ssh sudo                                             | functional-683521           | jenkins | v1.31.1 | 31 Jul 23 10:42 UTC |                     |
	|         | umount -f /mount-9p                                                    |                             |         |         |                     |                     |
	| ssh     | functional-683521 ssh findmnt                                          | functional-683521           | jenkins | v1.31.1 | 31 Jul 23 10:42 UTC |                     |
	|         | -T /mount1                                                             |                             |         |         |                     |                     |
	| mount   | -p functional-683521                                                   | functional-683521           | jenkins | v1.31.1 | 31 Jul 23 10:42 UTC |                     |
	|         | /tmp/TestFunctionalparallelMountCmdVerifyCleanup2460904278/001:/mount2 |                             |         |         |                     |                     |
	|         | --alsologtostderr -v=1                                                 |                             |         |         |                     |                     |
	| mount   | -p functional-683521                                                   | functional-683521           | jenkins | v1.31.1 | 31 Jul 23 10:42 UTC |                     |
	|         | /tmp/TestFunctionalparallelMountCmdVerifyCleanup2460904278/001:/mount3 |                             |         |         |                     |                     |
	|         | --alsologtostderr -v=1                                                 |                             |         |         |                     |                     |
	| mount   | -p functional-683521                                                   | functional-683521           | jenkins | v1.31.1 | 31 Jul 23 10:42 UTC |                     |
	|         | /tmp/TestFunctionalparallelMountCmdVerifyCleanup2460904278/001:/mount1 |                             |         |         |                     |                     |
	|         | --alsologtostderr -v=1                                                 |                             |         |         |                     |                     |
	| image   | functional-683521 image save --daemon                                  | functional-683521           | jenkins | v1.31.1 | 31 Jul 23 10:42 UTC | 31 Jul 23 10:42 UTC |
	|         | gcr.io/google-containers/addon-resizer:functional-683521               |                             |         |         |                     |                     |
	|         | --alsologtostderr                                                      |                             |         |         |                     |                     |
	| ssh     | functional-683521 ssh findmnt                                          | functional-683521           | jenkins | v1.31.1 | 31 Jul 23 10:42 UTC | 31 Jul 23 10:42 UTC |
	|         | -T /mount1                                                             |                             |         |         |                     |                     |
	| ssh     | functional-683521 ssh findmnt                                          | functional-683521           | jenkins | v1.31.1 | 31 Jul 23 10:42 UTC | 31 Jul 23 10:42 UTC |
	|         | -T /mount2                                                             |                             |         |         |                     |                     |
	| ssh     | functional-683521 ssh findmnt                                          | functional-683521           | jenkins | v1.31.1 | 31 Jul 23 10:42 UTC | 31 Jul 23 10:42 UTC |
	|         | -T /mount3                                                             |                             |         |         |                     |                     |
	| mount   | -p functional-683521                                                   | functional-683521           | jenkins | v1.31.1 | 31 Jul 23 10:42 UTC |                     |
	|         | --kill=true                                                            |                             |         |         |                     |                     |
	| image   | functional-683521                                                      | functional-683521           | jenkins | v1.31.1 | 31 Jul 23 10:42 UTC | 31 Jul 23 10:42 UTC |
	|         | image ls --format yaml                                                 |                             |         |         |                     |                     |
	|         | --alsologtostderr                                                      |                             |         |         |                     |                     |
	| image   | functional-683521                                                      | functional-683521           | jenkins | v1.31.1 | 31 Jul 23 10:42 UTC |                     |
	|         | image ls --format short                                                |                             |         |         |                     |                     |
	|         | --alsologtostderr                                                      |                             |         |         |                     |                     |
	| ssh     | functional-683521 ssh pgrep                                            | functional-683521           | jenkins | v1.31.1 | 31 Jul 23 10:42 UTC |                     |
	|         | buildkitd                                                              |                             |         |         |                     |                     |
	| image   | functional-683521                                                      | functional-683521           | jenkins | v1.31.1 | 31 Jul 23 10:42 UTC | 31 Jul 23 10:42 UTC |
	|         | image ls --format json                                                 |                             |         |         |                     |                     |
	|         | --alsologtostderr                                                      |                             |         |         |                     |                     |
	| image   | functional-683521                                                      | functional-683521           | jenkins | v1.31.1 | 31 Jul 23 10:42 UTC |                     |
	|         | image ls --format table                                                |                             |         |         |                     |                     |
	|         | --alsologtostderr                                                      |                             |         |         |                     |                     |
	| image   | functional-683521 image build -t                                       | functional-683521           | jenkins | v1.31.1 | 31 Jul 23 10:42 UTC | 31 Jul 23 10:42 UTC |
	|         | localhost/my-image:functional-683521                                   |                             |         |         |                     |                     |
	|         | testdata/build --alsologtostderr                                       |                             |         |         |                     |                     |
	| image   | functional-683521 image ls                                             | functional-683521           | jenkins | v1.31.1 | 31 Jul 23 10:42 UTC | 31 Jul 23 10:42 UTC |
	| delete  | -p functional-683521                                                   | functional-683521           | jenkins | v1.31.1 | 31 Jul 23 10:42 UTC | 31 Jul 23 10:43 UTC |
	| start   | -p ingress-addon-legacy-538476                                         | ingress-addon-legacy-538476 | jenkins | v1.31.1 | 31 Jul 23 10:43 UTC | 31 Jul 23 10:44 UTC |
	|         | --kubernetes-version=v1.18.20                                          |                             |         |         |                     |                     |
	|         | --memory=4096 --wait=true                                              |                             |         |         |                     |                     |
	|         | --alsologtostderr                                                      |                             |         |         |                     |                     |
	|         | -v=5 --driver=docker                                                   |                             |         |         |                     |                     |
	|         | --container-runtime=crio                                               |                             |         |         |                     |                     |
	| addons  | ingress-addon-legacy-538476                                            | ingress-addon-legacy-538476 | jenkins | v1.31.1 | 31 Jul 23 10:44 UTC | 31 Jul 23 10:44 UTC |
	|         | addons enable ingress                                                  |                             |         |         |                     |                     |
	|         | --alsologtostderr -v=5                                                 |                             |         |         |                     |                     |
	| addons  | ingress-addon-legacy-538476                                            | ingress-addon-legacy-538476 | jenkins | v1.31.1 | 31 Jul 23 10:44 UTC | 31 Jul 23 10:44 UTC |
	|         | addons enable ingress-dns                                              |                             |         |         |                     |                     |
	|         | --alsologtostderr -v=5                                                 |                             |         |         |                     |                     |
	| ssh     | ingress-addon-legacy-538476                                            | ingress-addon-legacy-538476 | jenkins | v1.31.1 | 31 Jul 23 10:44 UTC |                     |
	|         | ssh curl -s http://127.0.0.1/                                          |                             |         |         |                     |                     |
	|         | -H 'Host: nginx.example.com'                                           |                             |         |         |                     |                     |
	| ip      | ingress-addon-legacy-538476 ip                                         | ingress-addon-legacy-538476 | jenkins | v1.31.1 | 31 Jul 23 10:47 UTC | 31 Jul 23 10:47 UTC |
	| addons  | ingress-addon-legacy-538476                                            | ingress-addon-legacy-538476 | jenkins | v1.31.1 | 31 Jul 23 10:47 UTC | 31 Jul 23 10:47 UTC |
	|         | addons disable ingress-dns                                             |                             |         |         |                     |                     |
	|         | --alsologtostderr -v=1                                                 |                             |         |         |                     |                     |
	| addons  | ingress-addon-legacy-538476                                            | ingress-addon-legacy-538476 | jenkins | v1.31.1 | 31 Jul 23 10:47 UTC | 31 Jul 23 10:47 UTC |
	|         | addons disable ingress                                                 |                             |         |         |                     |                     |
	|         | --alsologtostderr -v=1                                                 |                             |         |         |                     |                     |
	|---------|------------------------------------------------------------------------|-----------------------------|---------|---------|---------------------|---------------------|
	
	* 
	* ==> Last Start <==
	* Log file created at: 2023/07/31 10:43:03
	Running on machine: ubuntu-20-agent-15
	Binary: Built with gc go1.20.6 for linux/amd64
	Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
	I0731 10:43:03.448950   51156 out.go:296] Setting OutFile to fd 1 ...
	I0731 10:43:03.449104   51156 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I0731 10:43:03.449115   51156 out.go:309] Setting ErrFile to fd 2...
	I0731 10:43:03.449122   51156 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I0731 10:43:03.449347   51156 root.go:338] Updating PATH: /home/jenkins/minikube-integration/16969-5799/.minikube/bin
	I0731 10:43:03.449888   51156 out.go:303] Setting JSON to false
	I0731 10:43:03.450972   51156 start.go:128] hostinfo: {"hostname":"ubuntu-20-agent-15","uptime":1535,"bootTime":1690798648,"procs":487,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1038-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
	I0731 10:43:03.451025   51156 start.go:138] virtualization: kvm guest
	I0731 10:43:03.453897   51156 out.go:177] * [ingress-addon-legacy-538476] minikube v1.31.1 on Ubuntu 20.04 (kvm/amd64)
	I0731 10:43:03.455467   51156 out.go:177]   - MINIKUBE_LOCATION=16969
	I0731 10:43:03.455423   51156 notify.go:220] Checking for updates...
	I0731 10:43:03.457067   51156 out.go:177]   - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I0731 10:43:03.458902   51156 out.go:177]   - KUBECONFIG=/home/jenkins/minikube-integration/16969-5799/kubeconfig
	I0731 10:43:03.460492   51156 out.go:177]   - MINIKUBE_HOME=/home/jenkins/minikube-integration/16969-5799/.minikube
	I0731 10:43:03.462026   51156 out.go:177]   - MINIKUBE_BIN=out/minikube-linux-amd64
	I0731 10:43:03.463491   51156 out.go:177]   - MINIKUBE_FORCE_SYSTEMD=
	I0731 10:43:03.465138   51156 driver.go:373] Setting default libvirt URI to qemu:///system
	I0731 10:43:03.486977   51156 docker.go:121] docker version: linux-24.0.5:Docker Engine - Community
	I0731 10:43:03.487039   51156 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0731 10:43:03.539868   51156 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:24 OomKillDisable:true NGoroutines:37 SystemTime:2023-07-31 10:43:03.531744178 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1038-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archi
tecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33648062464 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-15 Labels:[] ExperimentalBuild:false ServerVersion:24.0.5 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:3dce8eb055cbb6872793272b4f20ed16117344f8 Expected:3dce8eb055cbb6872793272b4f20ed16117344f8} RuncCommit:{ID:v1.1.7-0-g860f061 Expected:v1.1.7-0-g860f061} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.20.2] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
	I0731 10:43:03.539961   51156 docker.go:294] overlay module found
	I0731 10:43:03.542245   51156 out.go:177] * Using the docker driver based on user configuration
	I0731 10:43:03.543915   51156 start.go:298] selected driver: docker
	I0731 10:43:03.543928   51156 start.go:898] validating driver "docker" against <nil>
	I0731 10:43:03.543939   51156 start.go:909] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
	I0731 10:43:03.544716   51156 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0731 10:43:03.594975   51156 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:24 OomKillDisable:true NGoroutines:37 SystemTime:2023-07-31 10:43:03.58637913 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1038-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archit
ecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33648062464 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-15 Labels:[] ExperimentalBuild:false ServerVersion:24.0.5 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:3dce8eb055cbb6872793272b4f20ed16117344f8 Expected:3dce8eb055cbb6872793272b4f20ed16117344f8} RuncCommit:{ID:v1.1.7-0-g860f061 Expected:v1.1.7-0-g860f061} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.20.2] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
	I0731 10:43:03.595200   51156 start_flags.go:305] no existing cluster config was found, will generate one from the flags 
	I0731 10:43:03.595444   51156 start_flags.go:919] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
	I0731 10:43:03.597597   51156 out.go:177] * Using Docker driver with root privileges
	I0731 10:43:03.599453   51156 cni.go:84] Creating CNI manager for ""
	I0731 10:43:03.599473   51156 cni.go:143] "docker" driver + "crio" runtime found, recommending kindnet
	I0731 10:43:03.599485   51156 start_flags.go:314] Found "CNI" CNI - setting NetworkPlugin=cni
	I0731 10:43:03.599496   51156 start_flags.go:319] config:
	{Name:ingress-addon-legacy-538476 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 Memory:4096 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.18.20 ClusterName:ingress-addon-legacy-538476 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:cri
o CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0}
	I0731 10:43:03.601160   51156 out.go:177] * Starting control plane node ingress-addon-legacy-538476 in cluster ingress-addon-legacy-538476
	I0731 10:43:03.602602   51156 cache.go:122] Beginning downloading kic base image for docker with crio
	I0731 10:43:03.604210   51156 out.go:177] * Pulling base image ...
	I0731 10:43:03.605691   51156 preload.go:132] Checking if preload exists for k8s version v1.18.20 and runtime crio
	I0731 10:43:03.605788   51156 image.go:79] Checking for gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 in local docker daemon
	I0731 10:43:03.621423   51156 image.go:83] Found gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 in local docker daemon, skipping pull
	I0731 10:43:03.621442   51156 cache.go:145] gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 exists in daemon, skipping load
	I0731 10:43:03.639833   51156 preload.go:119] Found remote preload: https://storage.googleapis.com/minikube-preloaded-volume-tarballs/v18/v1.18.20/preloaded-images-k8s-v18-v1.18.20-cri-o-overlay-amd64.tar.lz4
	I0731 10:43:03.639861   51156 cache.go:57] Caching tarball of preloaded images
	I0731 10:43:03.639998   51156 preload.go:132] Checking if preload exists for k8s version v1.18.20 and runtime crio
	I0731 10:43:03.642063   51156 out.go:177] * Downloading Kubernetes v1.18.20 preload ...
	I0731 10:43:03.643666   51156 preload.go:238] getting checksum for preloaded-images-k8s-v18-v1.18.20-cri-o-overlay-amd64.tar.lz4 ...
	I0731 10:43:03.675753   51156 download.go:107] Downloading: https://storage.googleapis.com/minikube-preloaded-volume-tarballs/v18/v1.18.20/preloaded-images-k8s-v18-v1.18.20-cri-o-overlay-amd64.tar.lz4?checksum=md5:0d02e096853189c5b37812b400898e14 -> /home/jenkins/minikube-integration/16969-5799/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.18.20-cri-o-overlay-amd64.tar.lz4
	I0731 10:43:08.351969   51156 preload.go:249] saving checksum for preloaded-images-k8s-v18-v1.18.20-cri-o-overlay-amd64.tar.lz4 ...
	I0731 10:43:08.352088   51156 preload.go:256] verifying checksum of /home/jenkins/minikube-integration/16969-5799/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.18.20-cri-o-overlay-amd64.tar.lz4 ...
	I0731 10:43:09.309032   51156 cache.go:60] Finished verifying existence of preloaded tar for  v1.18.20 on crio
	I0731 10:43:09.309377   51156 profile.go:148] Saving config to /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/config.json ...
	I0731 10:43:09.309415   51156 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/config.json: {Name:mka0cfcd8d72b121cc113094e8ac6bd750b8c09a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0731 10:43:09.309621   51156 cache.go:195] Successfully downloaded all kic artifacts
	I0731 10:43:09.309649   51156 start.go:365] acquiring machines lock for ingress-addon-legacy-538476: {Name:mk4d2e12d12ebffa9a442e88c026a68e17500cc4 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0731 10:43:09.309711   51156 start.go:369] acquired machines lock for "ingress-addon-legacy-538476" in 48.347µs
	I0731 10:43:09.309737   51156 start.go:93] Provisioning new machine with config: &{Name:ingress-addon-legacy-538476 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 Memory:4096 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.18.20 ClusterName:ingress-addon-legacy-538476 Namespace:default APIServerName:minikub
eCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.18.20 ContainerRuntime:crio ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false
DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0} &{Name: IP: Port:8443 KubernetesVersion:v1.18.20 ContainerRuntime:crio ControlPlane:true Worker:true}
	I0731 10:43:09.309831   51156 start.go:125] createHost starting for "" (driver="docker")
	I0731 10:43:09.312076   51156 out.go:204] * Creating docker container (CPUs=2, Memory=4096MB) ...
	I0731 10:43:09.312332   51156 start.go:159] libmachine.API.Create for "ingress-addon-legacy-538476" (driver="docker")
	I0731 10:43:09.312365   51156 client.go:168] LocalClient.Create starting
	I0731 10:43:09.312450   51156 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem
	I0731 10:43:09.312486   51156 main.go:141] libmachine: Decoding PEM data...
	I0731 10:43:09.312507   51156 main.go:141] libmachine: Parsing certificate...
	I0731 10:43:09.312570   51156 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/16969-5799/.minikube/certs/cert.pem
	I0731 10:43:09.312597   51156 main.go:141] libmachine: Decoding PEM data...
	I0731 10:43:09.312612   51156 main.go:141] libmachine: Parsing certificate...
	I0731 10:43:09.312940   51156 cli_runner.go:164] Run: docker network inspect ingress-addon-legacy-538476 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
	W0731 10:43:09.328433   51156 cli_runner.go:211] docker network inspect ingress-addon-legacy-538476 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
	I0731 10:43:09.328503   51156 network_create.go:281] running [docker network inspect ingress-addon-legacy-538476] to gather additional debugging logs...
	I0731 10:43:09.328525   51156 cli_runner.go:164] Run: docker network inspect ingress-addon-legacy-538476
	W0731 10:43:09.342818   51156 cli_runner.go:211] docker network inspect ingress-addon-legacy-538476 returned with exit code 1
	I0731 10:43:09.342840   51156 network_create.go:284] error running [docker network inspect ingress-addon-legacy-538476]: docker network inspect ingress-addon-legacy-538476: exit status 1
	stdout:
	[]
	
	stderr:
	Error response from daemon: network ingress-addon-legacy-538476 not found
	I0731 10:43:09.342853   51156 network_create.go:286] output of [docker network inspect ingress-addon-legacy-538476]: -- stdout --
	[]
	
	-- /stdout --
	** stderr ** 
	Error response from daemon: network ingress-addon-legacy-538476 not found
	
	** /stderr **
	I0731 10:43:09.342899   51156 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
	I0731 10:43:09.358673   51156 network.go:209] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc0010cc890}
	I0731 10:43:09.358709   51156 network_create.go:123] attempt to create docker network ingress-addon-legacy-538476 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
	I0731 10:43:09.358755   51156 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=ingress-addon-legacy-538476 ingress-addon-legacy-538476
	I0731 10:43:09.408158   51156 network_create.go:107] docker network ingress-addon-legacy-538476 192.168.49.0/24 created
	I0731 10:43:09.408186   51156 kic.go:117] calculated static IP "192.168.49.2" for the "ingress-addon-legacy-538476" container
	I0731 10:43:09.408237   51156 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
	I0731 10:43:09.422767   51156 cli_runner.go:164] Run: docker volume create ingress-addon-legacy-538476 --label name.minikube.sigs.k8s.io=ingress-addon-legacy-538476 --label created_by.minikube.sigs.k8s.io=true
	I0731 10:43:09.438676   51156 oci.go:103] Successfully created a docker volume ingress-addon-legacy-538476
	I0731 10:43:09.438767   51156 cli_runner.go:164] Run: docker run --rm --name ingress-addon-legacy-538476-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ingress-addon-legacy-538476 --entrypoint /usr/bin/test -v ingress-addon-legacy-538476:/var gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 -d /var/lib
	I0731 10:43:11.152081   51156 cli_runner.go:217] Completed: docker run --rm --name ingress-addon-legacy-538476-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ingress-addon-legacy-538476 --entrypoint /usr/bin/test -v ingress-addon-legacy-538476:/var gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 -d /var/lib: (1.713250314s)
	I0731 10:43:11.152115   51156 oci.go:107] Successfully prepared a docker volume ingress-addon-legacy-538476
	I0731 10:43:11.152149   51156 preload.go:132] Checking if preload exists for k8s version v1.18.20 and runtime crio
	I0731 10:43:11.152168   51156 kic.go:190] Starting extracting preloaded images to volume ...
	I0731 10:43:11.152222   51156 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/16969-5799/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.18.20-cri-o-overlay-amd64.tar.lz4:/preloaded.tar:ro -v ingress-addon-legacy-538476:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 -I lz4 -xf /preloaded.tar -C /extractDir
	I0731 10:43:16.427009   51156 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/16969-5799/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.18.20-cri-o-overlay-amd64.tar.lz4:/preloaded.tar:ro -v ingress-addon-legacy-538476:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 -I lz4 -xf /preloaded.tar -C /extractDir: (5.27472189s)
	I0731 10:43:16.427041   51156 kic.go:199] duration metric: took 5.274867 seconds to extract preloaded images to volume
	W0731 10:43:16.427184   51156 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
	I0731 10:43:16.427292   51156 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
	I0731 10:43:16.477326   51156 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname ingress-addon-legacy-538476 --name ingress-addon-legacy-538476 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ingress-addon-legacy-538476 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=ingress-addon-legacy-538476 --network ingress-addon-legacy-538476 --ip 192.168.49.2 --volume ingress-addon-legacy-538476:/var --security-opt apparmor=unconfined --memory=4096mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631
	I0731 10:43:16.785017   51156 cli_runner.go:164] Run: docker container inspect ingress-addon-legacy-538476 --format={{.State.Running}}
	I0731 10:43:16.801116   51156 cli_runner.go:164] Run: docker container inspect ingress-addon-legacy-538476 --format={{.State.Status}}
	I0731 10:43:16.818714   51156 cli_runner.go:164] Run: docker exec ingress-addon-legacy-538476 stat /var/lib/dpkg/alternatives/iptables
	I0731 10:43:16.865970   51156 oci.go:144] the created container "ingress-addon-legacy-538476" has a running status.
	I0731 10:43:16.866001   51156 kic.go:221] Creating ssh key for kic: /home/jenkins/minikube-integration/16969-5799/.minikube/machines/ingress-addon-legacy-538476/id_rsa...
	I0731 10:43:17.046075   51156 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/machines/ingress-addon-legacy-538476/id_rsa.pub -> /home/docker/.ssh/authorized_keys
	I0731 10:43:17.046131   51156 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/16969-5799/.minikube/machines/ingress-addon-legacy-538476/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
	I0731 10:43:17.070626   51156 cli_runner.go:164] Run: docker container inspect ingress-addon-legacy-538476 --format={{.State.Status}}
	I0731 10:43:17.087590   51156 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
	I0731 10:43:17.087623   51156 kic_runner.go:114] Args: [docker exec --privileged ingress-addon-legacy-538476 chown docker:docker /home/docker/.ssh/authorized_keys]
	I0731 10:43:17.151493   51156 cli_runner.go:164] Run: docker container inspect ingress-addon-legacy-538476 --format={{.State.Status}}
	I0731 10:43:17.171330   51156 machine.go:88] provisioning docker machine ...
	I0731 10:43:17.171371   51156 ubuntu.go:169] provisioning hostname "ingress-addon-legacy-538476"
	I0731 10:43:17.171448   51156 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ingress-addon-legacy-538476
	I0731 10:43:17.191980   51156 main.go:141] libmachine: Using SSH client type: native
	I0731 10:43:17.192436   51156 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x80eb00] 0x811ba0 <nil>  [] 0s} 127.0.0.1 32787 <nil> <nil>}
	I0731 10:43:17.192459   51156 main.go:141] libmachine: About to run SSH command:
	sudo hostname ingress-addon-legacy-538476 && echo "ingress-addon-legacy-538476" | sudo tee /etc/hostname
	I0731 10:43:17.193038   51156 main.go:141] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:37254->127.0.0.1:32787: read: connection reset by peer
	I0731 10:43:20.327971   51156 main.go:141] libmachine: SSH cmd err, output: <nil>: ingress-addon-legacy-538476
	
	I0731 10:43:20.328043   51156 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ingress-addon-legacy-538476
	I0731 10:43:20.343778   51156 main.go:141] libmachine: Using SSH client type: native
	I0731 10:43:20.344306   51156 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x80eb00] 0x811ba0 <nil>  [] 0s} 127.0.0.1 32787 <nil> <nil>}
	I0731 10:43:20.344336   51156 main.go:141] libmachine: About to run SSH command:
	
			if ! grep -xq '.*\singress-addon-legacy-538476' /etc/hosts; then
				if grep -xq '127.0.1.1\s.*' /etc/hosts; then
					sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 ingress-addon-legacy-538476/g' /etc/hosts;
				else 
					echo '127.0.1.1 ingress-addon-legacy-538476' | sudo tee -a /etc/hosts; 
				fi
			fi
	I0731 10:43:20.465588   51156 main.go:141] libmachine: SSH cmd err, output: <nil>: 
	I0731 10:43:20.465619   51156 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/16969-5799/.minikube CaCertPath:/home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/16969-5799/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/16969-5799/.minikube}
	I0731 10:43:20.465649   51156 ubuntu.go:177] setting up certificates
	I0731 10:43:20.465660   51156 provision.go:83] configureAuth start
	I0731 10:43:20.465713   51156 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ingress-addon-legacy-538476
	I0731 10:43:20.480794   51156 provision.go:138] copyHostCerts
	I0731 10:43:20.480826   51156 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem -> /home/jenkins/minikube-integration/16969-5799/.minikube/ca.pem
	I0731 10:43:20.480851   51156 exec_runner.go:144] found /home/jenkins/minikube-integration/16969-5799/.minikube/ca.pem, removing ...
	I0731 10:43:20.480869   51156 exec_runner.go:203] rm: /home/jenkins/minikube-integration/16969-5799/.minikube/ca.pem
	I0731 10:43:20.480934   51156 exec_runner.go:151] cp: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/16969-5799/.minikube/ca.pem (1082 bytes)
	I0731 10:43:20.481005   51156 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/cert.pem -> /home/jenkins/minikube-integration/16969-5799/.minikube/cert.pem
	I0731 10:43:20.481021   51156 exec_runner.go:144] found /home/jenkins/minikube-integration/16969-5799/.minikube/cert.pem, removing ...
	I0731 10:43:20.481026   51156 exec_runner.go:203] rm: /home/jenkins/minikube-integration/16969-5799/.minikube/cert.pem
	I0731 10:43:20.481050   51156 exec_runner.go:151] cp: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/16969-5799/.minikube/cert.pem (1123 bytes)
	I0731 10:43:20.481096   51156 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/key.pem -> /home/jenkins/minikube-integration/16969-5799/.minikube/key.pem
	I0731 10:43:20.481111   51156 exec_runner.go:144] found /home/jenkins/minikube-integration/16969-5799/.minikube/key.pem, removing ...
	I0731 10:43:20.481118   51156 exec_runner.go:203] rm: /home/jenkins/minikube-integration/16969-5799/.minikube/key.pem
	I0731 10:43:20.481139   51156 exec_runner.go:151] cp: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/16969-5799/.minikube/key.pem (1675 bytes)
	I0731 10:43:20.481185   51156 provision.go:112] generating server cert: /home/jenkins/minikube-integration/16969-5799/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca-key.pem org=jenkins.ingress-addon-legacy-538476 san=[192.168.49.2 127.0.0.1 localhost 127.0.0.1 minikube ingress-addon-legacy-538476]
	I0731 10:43:20.599552   51156 provision.go:172] copyRemoteCerts
	I0731 10:43:20.599602   51156 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
	I0731 10:43:20.599633   51156 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ingress-addon-legacy-538476
	I0731 10:43:20.615004   51156 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32787 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/ingress-addon-legacy-538476/id_rsa Username:docker}
	I0731 10:43:20.705879   51156 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem -> /etc/docker/ca.pem
	I0731 10:43:20.705927   51156 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
	I0731 10:43:20.726292   51156 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/machines/server.pem -> /etc/docker/server.pem
	I0731 10:43:20.726337   51156 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/machines/server.pem --> /etc/docker/server.pem (1253 bytes)
	I0731 10:43:20.745475   51156 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/machines/server-key.pem -> /etc/docker/server-key.pem
	I0731 10:43:20.745528   51156 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
	I0731 10:43:20.764931   51156 provision.go:86] duration metric: configureAuth took 299.254814ms
	I0731 10:43:20.764956   51156 ubuntu.go:193] setting minikube options for container-runtime
	I0731 10:43:20.765158   51156 config.go:182] Loaded profile config "ingress-addon-legacy-538476": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.18.20
	I0731 10:43:20.765272   51156 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ingress-addon-legacy-538476
	I0731 10:43:20.781440   51156 main.go:141] libmachine: Using SSH client type: native
	I0731 10:43:20.781852   51156 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x80eb00] 0x811ba0 <nil>  [] 0s} 127.0.0.1 32787 <nil> <nil>}
	I0731 10:43:20.781876   51156 main.go:141] libmachine: About to run SSH command:
	sudo mkdir -p /etc/sysconfig && printf %!s(MISSING) "
	CRIO_MINIKUBE_OPTIONS='--insecure-registry 10.96.0.0/12 '
	" | sudo tee /etc/sysconfig/crio.minikube && sudo systemctl restart crio
	I0731 10:43:21.007861   51156 main.go:141] libmachine: SSH cmd err, output: <nil>: 
	CRIO_MINIKUBE_OPTIONS='--insecure-registry 10.96.0.0/12 '
	
	I0731 10:43:21.007898   51156 machine.go:91] provisioned docker machine in 3.836541752s
	I0731 10:43:21.007911   51156 client.go:171] LocalClient.Create took 11.695540191s
	I0731 10:43:21.007943   51156 start.go:167] duration metric: libmachine.API.Create for "ingress-addon-legacy-538476" took 11.695607906s
	I0731 10:43:21.007960   51156 start.go:300] post-start starting for "ingress-addon-legacy-538476" (driver="docker")
	I0731 10:43:21.007973   51156 start.go:329] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
	I0731 10:43:21.008041   51156 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
	I0731 10:43:21.008138   51156 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ingress-addon-legacy-538476
	I0731 10:43:21.024504   51156 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32787 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/ingress-addon-legacy-538476/id_rsa Username:docker}
	I0731 10:43:21.117913   51156 ssh_runner.go:195] Run: cat /etc/os-release
	I0731 10:43:21.120577   51156 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
	I0731 10:43:21.120606   51156 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
	I0731 10:43:21.120620   51156 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
	I0731 10:43:21.120630   51156 info.go:137] Remote host: Ubuntu 22.04.2 LTS
	I0731 10:43:21.120641   51156 filesync.go:126] Scanning /home/jenkins/minikube-integration/16969-5799/.minikube/addons for local assets ...
	I0731 10:43:21.120689   51156 filesync.go:126] Scanning /home/jenkins/minikube-integration/16969-5799/.minikube/files for local assets ...
	I0731 10:43:21.120773   51156 filesync.go:149] local asset: /home/jenkins/minikube-integration/16969-5799/.minikube/files/etc/ssl/certs/125372.pem -> 125372.pem in /etc/ssl/certs
	I0731 10:43:21.120784   51156 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/files/etc/ssl/certs/125372.pem -> /etc/ssl/certs/125372.pem
	I0731 10:43:21.120890   51156 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
	I0731 10:43:21.128082   51156 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/files/etc/ssl/certs/125372.pem --> /etc/ssl/certs/125372.pem (1708 bytes)
	I0731 10:43:21.147531   51156 start.go:303] post-start completed in 139.559306ms
	I0731 10:43:21.147827   51156 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ingress-addon-legacy-538476
	I0731 10:43:21.163499   51156 profile.go:148] Saving config to /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/config.json ...
	I0731 10:43:21.163722   51156 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I0731 10:43:21.163769   51156 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ingress-addon-legacy-538476
	I0731 10:43:21.179451   51156 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32787 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/ingress-addon-legacy-538476/id_rsa Username:docker}
	I0731 10:43:21.266408   51156 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
	I0731 10:43:21.270118   51156 start.go:128] duration metric: createHost completed in 11.960275178s
	I0731 10:43:21.270137   51156 start.go:83] releasing machines lock for "ingress-addon-legacy-538476", held for 11.9604156s
	I0731 10:43:21.270185   51156 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ingress-addon-legacy-538476
	I0731 10:43:21.285555   51156 ssh_runner.go:195] Run: cat /version.json
	I0731 10:43:21.285629   51156 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
	I0731 10:43:21.285652   51156 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ingress-addon-legacy-538476
	I0731 10:43:21.285677   51156 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ingress-addon-legacy-538476
	I0731 10:43:21.302014   51156 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32787 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/ingress-addon-legacy-538476/id_rsa Username:docker}
	I0731 10:43:21.302234   51156 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32787 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/ingress-addon-legacy-538476/id_rsa Username:docker}
	I0731 10:43:21.385362   51156 ssh_runner.go:195] Run: systemctl --version
	I0731 10:43:21.472527   51156 ssh_runner.go:195] Run: sudo sh -c "podman version >/dev/null"
	I0731 10:43:21.607765   51156 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
	I0731 10:43:21.611832   51156 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "sudo mv {} {}.mk_disabled" ;
	I0731 10:43:21.629011   51156 cni.go:221] loopback cni configuration disabled: "/etc/cni/net.d/*loopback.conf*" found
	I0731 10:43:21.629083   51156 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%!p(MISSING), " -exec sh -c "sudo mv {} {}.mk_disabled" ;
	I0731 10:43:21.652824   51156 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
	I0731 10:43:21.652845   51156 start.go:466] detecting cgroup driver to use...
	I0731 10:43:21.652875   51156 detect.go:196] detected "cgroupfs" cgroup driver on host os
	I0731 10:43:21.652920   51156 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
	I0731 10:43:21.665535   51156 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
	I0731 10:43:21.674417   51156 docker.go:196] disabling cri-docker service (if available) ...
	I0731 10:43:21.674455   51156 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
	I0731 10:43:21.685355   51156 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
	I0731 10:43:21.696801   51156 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
	I0731 10:43:21.774886   51156 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
	I0731 10:43:21.852217   51156 docker.go:212] disabling docker service ...
	I0731 10:43:21.852270   51156 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
	I0731 10:43:21.868370   51156 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
	I0731 10:43:21.877488   51156 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
	I0731 10:43:21.949135   51156 ssh_runner.go:195] Run: sudo systemctl mask docker.service
	I0731 10:43:22.025134   51156 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
	I0731 10:43:22.034548   51156 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %!s(MISSING) "runtime-endpoint: unix:///var/run/crio/crio.sock
	" | sudo tee /etc/crictl.yaml"
	I0731 10:43:22.047955   51156 crio.go:59] configure cri-o to use "registry.k8s.io/pause:3.2" pause image...
	I0731 10:43:22.048000   51156 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|^.*pause_image = .*$|pause_image = "registry.k8s.io/pause:3.2"|' /etc/crio/crio.conf.d/02-crio.conf"
	I0731 10:43:22.055947   51156 crio.go:70] configuring cri-o to use "cgroupfs" as cgroup driver...
	I0731 10:43:22.055986   51156 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|^.*cgroup_manager = .*$|cgroup_manager = "cgroupfs"|' /etc/crio/crio.conf.d/02-crio.conf"
	I0731 10:43:22.063767   51156 ssh_runner.go:195] Run: sh -c "sudo sed -i '/conmon_cgroup = .*/d' /etc/crio/crio.conf.d/02-crio.conf"
	I0731 10:43:22.071423   51156 ssh_runner.go:195] Run: sh -c "sudo sed -i '/cgroup_manager = .*/a conmon_cgroup = "pod"' /etc/crio/crio.conf.d/02-crio.conf"
	I0731 10:43:22.079196   51156 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
	I0731 10:43:22.086512   51156 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
	I0731 10:43:22.093079   51156 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
	I0731 10:43:22.099682   51156 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0731 10:43:22.172223   51156 ssh_runner.go:195] Run: sudo systemctl restart crio
	I0731 10:43:22.268982   51156 start.go:513] Will wait 60s for socket path /var/run/crio/crio.sock
	I0731 10:43:22.269066   51156 ssh_runner.go:195] Run: stat /var/run/crio/crio.sock
	I0731 10:43:22.272196   51156 start.go:534] Will wait 60s for crictl version
	I0731 10:43:22.272246   51156 ssh_runner.go:195] Run: which crictl
	I0731 10:43:22.275093   51156 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
	I0731 10:43:22.304141   51156 start.go:550] Version:  0.1.0
	RuntimeName:  cri-o
	RuntimeVersion:  1.24.6
	RuntimeApiVersion:  v1
	I0731 10:43:22.304201   51156 ssh_runner.go:195] Run: crio --version
	I0731 10:43:22.335032   51156 ssh_runner.go:195] Run: crio --version
	I0731 10:43:22.367656   51156 out.go:177] * Preparing Kubernetes v1.18.20 on CRI-O 1.24.6 ...
	I0731 10:43:22.369150   51156 cli_runner.go:164] Run: docker network inspect ingress-addon-legacy-538476 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
	I0731 10:43:22.385038   51156 ssh_runner.go:195] Run: grep 192.168.49.1	host.minikube.internal$ /etc/hosts
	I0731 10:43:22.388326   51156 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1	host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I0731 10:43:22.397789   51156 preload.go:132] Checking if preload exists for k8s version v1.18.20 and runtime crio
	I0731 10:43:22.397839   51156 ssh_runner.go:195] Run: sudo crictl images --output json
	I0731 10:43:22.438497   51156 crio.go:492] couldn't find preloaded image for "registry.k8s.io/kube-apiserver:v1.18.20". assuming images are not preloaded.
	I0731 10:43:22.438548   51156 ssh_runner.go:195] Run: which lz4
	I0731 10:43:22.441576   51156 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.18.20-cri-o-overlay-amd64.tar.lz4 -> /preloaded.tar.lz4
	I0731 10:43:22.441639   51156 ssh_runner.go:195] Run: stat -c "%!s(MISSING) %!y(MISSING)" /preloaded.tar.lz4
	I0731 10:43:22.444418   51156 ssh_runner.go:352] existence check for /preloaded.tar.lz4: stat -c "%!s(MISSING) %!y(MISSING)" /preloaded.tar.lz4: Process exited with status 1
	stdout:
	
	stderr:
	stat: cannot statx '/preloaded.tar.lz4': No such file or directory
	I0731 10:43:22.444442   51156 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.18.20-cri-o-overlay-amd64.tar.lz4 --> /preloaded.tar.lz4 (495439307 bytes)
	I0731 10:43:23.319323   51156 crio.go:444] Took 0.877703 seconds to copy over tarball
	I0731 10:43:23.319385   51156 ssh_runner.go:195] Run: sudo tar -I lz4 -C /var -xf /preloaded.tar.lz4
	I0731 10:43:25.519878   51156 ssh_runner.go:235] Completed: sudo tar -I lz4 -C /var -xf /preloaded.tar.lz4: (2.200461943s)
	I0731 10:43:25.519907   51156 crio.go:451] Took 2.200562 seconds to extract the tarball
	I0731 10:43:25.519915   51156 ssh_runner.go:146] rm: /preloaded.tar.lz4
	I0731 10:43:25.588301   51156 ssh_runner.go:195] Run: sudo crictl images --output json
	I0731 10:43:25.617955   51156 crio.go:492] couldn't find preloaded image for "registry.k8s.io/kube-apiserver:v1.18.20". assuming images are not preloaded.
	I0731 10:43:25.617975   51156 cache_images.go:88] LoadImages start: [registry.k8s.io/kube-apiserver:v1.18.20 registry.k8s.io/kube-controller-manager:v1.18.20 registry.k8s.io/kube-scheduler:v1.18.20 registry.k8s.io/kube-proxy:v1.18.20 registry.k8s.io/pause:3.2 registry.k8s.io/etcd:3.4.3-0 registry.k8s.io/coredns:1.6.7 gcr.io/k8s-minikube/storage-provisioner:v5]
	I0731 10:43:25.618039   51156 image.go:134] retrieving image: gcr.io/k8s-minikube/storage-provisioner:v5
	I0731 10:43:25.618049   51156 image.go:134] retrieving image: registry.k8s.io/kube-proxy:v1.18.20
	I0731 10:43:25.618081   51156 image.go:134] retrieving image: registry.k8s.io/kube-controller-manager:v1.18.20
	I0731 10:43:25.618123   51156 image.go:134] retrieving image: registry.k8s.io/etcd:3.4.3-0
	I0731 10:43:25.618146   51156 image.go:134] retrieving image: registry.k8s.io/coredns:1.6.7
	I0731 10:43:25.618088   51156 image.go:134] retrieving image: registry.k8s.io/kube-apiserver:v1.18.20
	I0731 10:43:25.618124   51156 image.go:134] retrieving image: registry.k8s.io/kube-scheduler:v1.18.20
	I0731 10:43:25.618230   51156 image.go:134] retrieving image: registry.k8s.io/pause:3.2
	I0731 10:43:25.619369   51156 image.go:177] daemon lookup for registry.k8s.io/etcd:3.4.3-0: Error response from daemon: No such image: registry.k8s.io/etcd:3.4.3-0
	I0731 10:43:25.619382   51156 image.go:177] daemon lookup for registry.k8s.io/kube-scheduler:v1.18.20: Error response from daemon: No such image: registry.k8s.io/kube-scheduler:v1.18.20
	I0731 10:43:25.619388   51156 image.go:177] daemon lookup for registry.k8s.io/coredns:1.6.7: Error response from daemon: No such image: registry.k8s.io/coredns:1.6.7
	I0731 10:43:25.619396   51156 image.go:177] daemon lookup for gcr.io/k8s-minikube/storage-provisioner:v5: Error response from daemon: No such image: gcr.io/k8s-minikube/storage-provisioner:v5
	I0731 10:43:25.619410   51156 image.go:177] daemon lookup for registry.k8s.io/pause:3.2: Error response from daemon: No such image: registry.k8s.io/pause:3.2
	I0731 10:43:25.619422   51156 image.go:177] daemon lookup for registry.k8s.io/kube-proxy:v1.18.20: Error response from daemon: No such image: registry.k8s.io/kube-proxy:v1.18.20
	I0731 10:43:25.619441   51156 image.go:177] daemon lookup for registry.k8s.io/kube-controller-manager:v1.18.20: Error response from daemon: No such image: registry.k8s.io/kube-controller-manager:v1.18.20
	I0731 10:43:25.619538   51156 image.go:177] daemon lookup for registry.k8s.io/kube-apiserver:v1.18.20: Error response from daemon: No such image: registry.k8s.io/kube-apiserver:v1.18.20
	I0731 10:43:25.808069   51156 ssh_runner.go:195] Run: sudo podman image inspect --format {{.Id}} gcr.io/k8s-minikube/storage-provisioner:v5
	I0731 10:43:26.915060   51156 ssh_runner.go:195] Run: sudo podman image inspect --format {{.Id}} registry.k8s.io/coredns:1.6.7
	I0731 10:43:26.945906   51156 ssh_runner.go:195] Run: sudo podman image inspect --format {{.Id}} registry.k8s.io/kube-scheduler:v1.18.20
	I0731 10:43:26.948510   51156 ssh_runner.go:195] Run: sudo podman image inspect --format {{.Id}} registry.k8s.io/etcd:3.4.3-0
	I0731 10:43:26.949676   51156 cache_images.go:116] "registry.k8s.io/coredns:1.6.7" needs transfer: "registry.k8s.io/coredns:1.6.7" does not exist at hash "67da37a9a360e600e74464da48437257b00a754c77c40f60c65e4cb327c34bd5" in container runtime
	I0731 10:43:26.949711   51156 cri.go:218] Removing image: registry.k8s.io/coredns:1.6.7
	I0731 10:43:26.949747   51156 ssh_runner.go:195] Run: which crictl
	I0731 10:43:26.956233   51156 ssh_runner.go:195] Run: sudo podman image inspect --format {{.Id}} registry.k8s.io/pause:3.2
	I0731 10:43:26.980681   51156 ssh_runner.go:195] Run: sudo podman image inspect --format {{.Id}} registry.k8s.io/kube-apiserver:v1.18.20
	I0731 10:43:26.983425   51156 cache_images.go:116] "registry.k8s.io/kube-scheduler:v1.18.20" needs transfer: "registry.k8s.io/kube-scheduler:v1.18.20" does not exist at hash "a05a1a79adaad058478b7638d2e73cf408b283305081516fbe02706b0e205346" in container runtime
	I0731 10:43:26.983463   51156 cri.go:218] Removing image: registry.k8s.io/kube-scheduler:v1.18.20
	I0731 10:43:26.983472   51156 cache_images.go:116] "registry.k8s.io/etcd:3.4.3-0" needs transfer: "registry.k8s.io/etcd:3.4.3-0" does not exist at hash "303ce5db0e90dab1c5728ec70d21091201a23cdf8aeca70ab54943bbaaf0833f" in container runtime
	I0731 10:43:26.983490   51156 cri.go:218] Removing image: registry.k8s.io/etcd:3.4.3-0
	I0731 10:43:26.983509   51156 ssh_runner.go:195] Run: which crictl
	I0731 10:43:26.983515   51156 ssh_runner.go:195] Run: which crictl
	I0731 10:43:26.983566   51156 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi registry.k8s.io/coredns:1.6.7
	I0731 10:43:27.006307   51156 ssh_runner.go:195] Run: sudo podman image inspect --format {{.Id}} registry.k8s.io/kube-controller-manager:v1.18.20
	I0731 10:43:27.011977   51156 cache_images.go:116] "registry.k8s.io/pause:3.2" needs transfer: "registry.k8s.io/pause:3.2" does not exist at hash "80d28bedfe5dec59da9ebf8e6260224ac9008ab5c11dbbe16ee3ba3e4439ac2c" in container runtime
	I0731 10:43:27.012015   51156 cri.go:218] Removing image: registry.k8s.io/pause:3.2
	I0731 10:43:27.012049   51156 ssh_runner.go:195] Run: which crictl
	I0731 10:43:27.027124   51156 ssh_runner.go:195] Run: sudo podman image inspect --format {{.Id}} registry.k8s.io/kube-proxy:v1.18.20
	I0731 10:43:27.030659   51156 cache_images.go:116] "registry.k8s.io/kube-apiserver:v1.18.20" needs transfer: "registry.k8s.io/kube-apiserver:v1.18.20" does not exist at hash "7d8d2960de69688eab5698081441539a1662f47e092488973e455a8334955cb1" in container runtime
	I0731 10:43:27.030701   51156 cri.go:218] Removing image: registry.k8s.io/kube-apiserver:v1.18.20
	I0731 10:43:27.030742   51156 ssh_runner.go:195] Run: which crictl
	I0731 10:43:27.030861   51156 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.18.20
	I0731 10:43:27.033812   51156 cache_images.go:286] Loading image from: /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/coredns_1.6.7
	I0731 10:43:27.033903   51156 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi registry.k8s.io/etcd:3.4.3-0
	I0731 10:43:27.104813   51156 cache_images.go:116] "registry.k8s.io/kube-controller-manager:v1.18.20" needs transfer: "registry.k8s.io/kube-controller-manager:v1.18.20" does not exist at hash "e7c545a60706cf009a893afdc7dba900cc2e342b8042b9c421d607ca41e8b290" in container runtime
	I0731 10:43:27.104853   51156 cri.go:218] Removing image: registry.k8s.io/kube-controller-manager:v1.18.20
	I0731 10:43:27.104904   51156 ssh_runner.go:195] Run: which crictl
	I0731 10:43:27.104911   51156 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi registry.k8s.io/pause:3.2
	I0731 10:43:27.124467   51156 cache_images.go:116] "registry.k8s.io/kube-proxy:v1.18.20" needs transfer: "registry.k8s.io/kube-proxy:v1.18.20" does not exist at hash "27f8b8d51985f755cfb3ffea424fa34865cc0da63e99378d8202f923c3c5a8ba" in container runtime
	I0731 10:43:27.124510   51156 cri.go:218] Removing image: registry.k8s.io/kube-proxy:v1.18.20
	I0731 10:43:27.124519   51156 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.18.20
	I0731 10:43:27.124547   51156 ssh_runner.go:195] Run: which crictl
	I0731 10:43:27.124579   51156 cache_images.go:286] Loading image from: /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.18.20
	I0731 10:43:27.124652   51156 cache_images.go:286] Loading image from: /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.4.3-0
	I0731 10:43:27.139973   51156 cache_images.go:286] Loading image from: /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/pause_3.2
	I0731 10:43:27.140029   51156 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.18.20
	I0731 10:43:27.212344   51156 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi registry.k8s.io/kube-proxy:v1.18.20
	I0731 10:43:27.212455   51156 cache_images.go:286] Loading image from: /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.18.20
	I0731 10:43:27.217780   51156 cache_images.go:286] Loading image from: /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.18.20
	I0731 10:43:27.242247   51156 cache_images.go:286] Loading image from: /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.18.20
	I0731 10:43:27.242294   51156 cache_images.go:92] LoadImages completed in 1.624307805s
	W0731 10:43:27.242347   51156 out.go:239] X Unable to load cached images: loading cached images: stat /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/coredns_1.6.7: no such file or directory
	I0731 10:43:27.242406   51156 ssh_runner.go:195] Run: crio config
	I0731 10:43:27.279390   51156 cni.go:84] Creating CNI manager for ""
	I0731 10:43:27.279410   51156 cni.go:143] "docker" driver + "crio" runtime found, recommending kindnet
	I0731 10:43:27.279420   51156 kubeadm.go:87] Using pod CIDR: 10.244.0.0/16
	I0731 10:43:27.279441   51156 kubeadm.go:176] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.18.20 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:ingress-addon-legacy-538476 NodeName:ingress-addon-legacy-538476 DNSDomain:cluster.local CRISocket:/var/run/crio/crio.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.c
rt StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:false}
	I0731 10:43:27.279590   51156 kubeadm.go:181] kubeadm config:
	apiVersion: kubeadm.k8s.io/v1beta2
	kind: InitConfiguration
	localAPIEndpoint:
	  advertiseAddress: 192.168.49.2
	  bindPort: 8443
	bootstrapTokens:
	  - groups:
	      - system:bootstrappers:kubeadm:default-node-token
	    ttl: 24h0m0s
	    usages:
	      - signing
	      - authentication
	nodeRegistration:
	  criSocket: /var/run/crio/crio.sock
	  name: "ingress-addon-legacy-538476"
	  kubeletExtraArgs:
	    node-ip: 192.168.49.2
	  taints: []
	---
	apiVersion: kubeadm.k8s.io/v1beta2
	kind: ClusterConfiguration
	apiServer:
	  certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
	  extraArgs:
	    enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
	controllerManager:
	  extraArgs:
	    allocate-node-cidrs: "true"
	    leader-elect: "false"
	scheduler:
	  extraArgs:
	    leader-elect: "false"
	certificatesDir: /var/lib/minikube/certs
	clusterName: mk
	controlPlaneEndpoint: control-plane.minikube.internal:8443
	dns:
	  type: CoreDNS
	etcd:
	  local:
	    dataDir: /var/lib/minikube/etcd
	    extraArgs:
	      proxy-refresh-interval: "70000"
	kubernetesVersion: v1.18.20
	networking:
	  dnsDomain: cluster.local
	  podSubnet: "10.244.0.0/16"
	  serviceSubnet: 10.96.0.0/12
	---
	apiVersion: kubelet.config.k8s.io/v1beta1
	kind: KubeletConfiguration
	authentication:
	  x509:
	    clientCAFile: /var/lib/minikube/certs/ca.crt
	cgroupDriver: cgroupfs
	hairpinMode: hairpin-veth
	runtimeRequestTimeout: 15m
	clusterDomain: "cluster.local"
	# disable disk resource management by default
	imageGCHighThresholdPercent: 100
	evictionHard:
	  nodefs.available: "0%!"(MISSING)
	  nodefs.inodesFree: "0%!"(MISSING)
	  imagefs.available: "0%!"(MISSING)
	failSwapOn: false
	staticPodPath: /etc/kubernetes/manifests
	---
	apiVersion: kubeproxy.config.k8s.io/v1alpha1
	kind: KubeProxyConfiguration
	clusterCIDR: "10.244.0.0/16"
	metricsBindAddress: 0.0.0.0:10249
	conntrack:
	  maxPerCore: 0
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
	  tcpEstablishedTimeout: 0s
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
	  tcpCloseWaitTimeout: 0s
	
	I0731 10:43:27.279692   51156 kubeadm.go:976] kubelet [Unit]
	Wants=crio.service
	
	[Service]
	ExecStart=
	ExecStart=/var/lib/minikube/binaries/v1.18.20/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroups-per-qos=false --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///var/run/crio/crio.sock --enforce-node-allocatable= --hostname-override=ingress-addon-legacy-538476 --kubeconfig=/etc/kubernetes/kubelet.conf --network-plugin=cni --node-ip=192.168.49.2
	
	[Install]
	 config:
	{KubernetesVersion:v1.18.20 ClusterName:ingress-addon-legacy-538476 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:}
	I0731 10:43:27.279749   51156 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.18.20
	I0731 10:43:27.287297   51156 binaries.go:44] Found k8s binaries, skipping transfer
	I0731 10:43:27.287345   51156 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
	I0731 10:43:27.294532   51156 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (486 bytes)
	I0731 10:43:27.308939   51156 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (353 bytes)
	I0731 10:43:27.323296   51156 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2123 bytes)
	I0731 10:43:27.337332   51156 ssh_runner.go:195] Run: grep 192.168.49.2	control-plane.minikube.internal$ /etc/hosts
	I0731 10:43:27.340164   51156 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.2	control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I0731 10:43:27.348716   51156 certs.go:56] Setting up /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476 for IP: 192.168.49.2
	I0731 10:43:27.348771   51156 certs.go:190] acquiring lock for shared ca certs: {Name:mke1f008d411b97835fe7ef4c9ac6bdba0705009 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0731 10:43:27.348892   51156 certs.go:199] skipping minikubeCA CA generation: /home/jenkins/minikube-integration/16969-5799/.minikube/ca.key
	I0731 10:43:27.348928   51156 certs.go:199] skipping proxyClientCA CA generation: /home/jenkins/minikube-integration/16969-5799/.minikube/proxy-client-ca.key
	I0731 10:43:27.348979   51156 certs.go:319] generating minikube-user signed cert: /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/client.key
	I0731 10:43:27.348991   51156 crypto.go:68] Generating cert /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/client.crt with IP's: []
	I0731 10:43:27.468225   51156 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/client.crt ...
	I0731 10:43:27.468256   51156 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/client.crt: {Name:mke7b3a7318aae0c80ae892574e7b7484f11ce76 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0731 10:43:27.468427   51156 crypto.go:164] Writing key to /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/client.key ...
	I0731 10:43:27.468439   51156 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/client.key: {Name:mk98ac650cdaa51dd7a9eea7f29133e52c874d7b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0731 10:43:27.468517   51156 certs.go:319] generating minikube signed cert: /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/apiserver.key.dd3b5fb2
	I0731 10:43:27.468533   51156 crypto.go:68] Generating cert /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/apiserver.crt.dd3b5fb2 with IP's: [192.168.49.2 10.96.0.1 127.0.0.1 10.0.0.1]
	I0731 10:43:27.542763   51156 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/apiserver.crt.dd3b5fb2 ...
	I0731 10:43:27.542788   51156 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/apiserver.crt.dd3b5fb2: {Name:mkf6a276c87af6fca105416b06a4b89fdbb8399b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0731 10:43:27.542943   51156 crypto.go:164] Writing key to /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/apiserver.key.dd3b5fb2 ...
	I0731 10:43:27.542955   51156 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/apiserver.key.dd3b5fb2: {Name:mk4992fd6e9a6de1839e1e219fddce46b460d1e6 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0731 10:43:27.543022   51156 certs.go:337] copying /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/apiserver.crt.dd3b5fb2 -> /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/apiserver.crt
	I0731 10:43:27.543099   51156 certs.go:341] copying /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/apiserver.key.dd3b5fb2 -> /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/apiserver.key
	I0731 10:43:27.543151   51156 certs.go:319] generating aggregator signed cert: /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/proxy-client.key
	I0731 10:43:27.543166   51156 crypto.go:68] Generating cert /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/proxy-client.crt with IP's: []
	I0731 10:43:27.635023   51156 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/proxy-client.crt ...
	I0731 10:43:27.635047   51156 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/proxy-client.crt: {Name:mkd0b75e57d4e30899bc67d085c00faa15b8b5c1 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0731 10:43:27.635192   51156 crypto.go:164] Writing key to /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/proxy-client.key ...
	I0731 10:43:27.635202   51156 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/proxy-client.key: {Name:mk6e9be354680465ee939fb5c74d82e351de528d Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0731 10:43:27.635266   51156 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/apiserver.crt -> /var/lib/minikube/certs/apiserver.crt
	I0731 10:43:27.635287   51156 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/apiserver.key -> /var/lib/minikube/certs/apiserver.key
	I0731 10:43:27.635298   51156 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/proxy-client.crt -> /var/lib/minikube/certs/proxy-client.crt
	I0731 10:43:27.635309   51156 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/proxy-client.key -> /var/lib/minikube/certs/proxy-client.key
	I0731 10:43:27.635319   51156 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/ca.crt -> /var/lib/minikube/certs/ca.crt
	I0731 10:43:27.635330   51156 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/ca.key -> /var/lib/minikube/certs/ca.key
	I0731 10:43:27.635346   51156 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/proxy-client-ca.crt -> /var/lib/minikube/certs/proxy-client-ca.crt
	I0731 10:43:27.635358   51156 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/proxy-client-ca.key -> /var/lib/minikube/certs/proxy-client-ca.key
	I0731 10:43:27.635403   51156 certs.go:437] found cert: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/home/jenkins/minikube-integration/16969-5799/.minikube/certs/12537.pem (1338 bytes)
	W0731 10:43:27.635435   51156 certs.go:433] ignoring /home/jenkins/minikube-integration/16969-5799/.minikube/certs/home/jenkins/minikube-integration/16969-5799/.minikube/certs/12537_empty.pem, impossibly tiny 0 bytes
	I0731 10:43:27.635444   51156 certs.go:437] found cert: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca-key.pem (1675 bytes)
	I0731 10:43:27.635464   51156 certs.go:437] found cert: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem (1082 bytes)
	I0731 10:43:27.635488   51156 certs.go:437] found cert: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/home/jenkins/minikube-integration/16969-5799/.minikube/certs/cert.pem (1123 bytes)
	I0731 10:43:27.635517   51156 certs.go:437] found cert: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/home/jenkins/minikube-integration/16969-5799/.minikube/certs/key.pem (1675 bytes)
	I0731 10:43:27.635554   51156 certs.go:437] found cert: /home/jenkins/minikube-integration/16969-5799/.minikube/files/etc/ssl/certs/home/jenkins/minikube-integration/16969-5799/.minikube/files/etc/ssl/certs/125372.pem (1708 bytes)
	I0731 10:43:27.635578   51156 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/ca.crt -> /usr/share/ca-certificates/minikubeCA.pem
	I0731 10:43:27.635594   51156 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/12537.pem -> /usr/share/ca-certificates/12537.pem
	I0731 10:43:27.635606   51156 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/files/etc/ssl/certs/125372.pem -> /usr/share/ca-certificates/125372.pem
	I0731 10:43:27.636155   51156 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1399 bytes)
	I0731 10:43:27.656920   51156 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
	I0731 10:43:27.676670   51156 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
	I0731 10:43:27.696387   51156 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
	I0731 10:43:27.715554   51156 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
	I0731 10:43:27.735312   51156 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
	I0731 10:43:27.754792   51156 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
	I0731 10:43:27.773978   51156 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
	I0731 10:43:27.792849   51156 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
	I0731 10:43:27.811910   51156 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/certs/12537.pem --> /usr/share/ca-certificates/12537.pem (1338 bytes)
	I0731 10:43:27.830788   51156 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/files/etc/ssl/certs/125372.pem --> /usr/share/ca-certificates/125372.pem (1708 bytes)
	I0731 10:43:27.849768   51156 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
	I0731 10:43:27.864100   51156 ssh_runner.go:195] Run: openssl version
	I0731 10:43:27.868645   51156 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
	I0731 10:43:27.876151   51156 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
	I0731 10:43:27.878943   51156 certs.go:480] hashing: -rw-r--r-- 1 root root 1111 Jul 31 10:34 /usr/share/ca-certificates/minikubeCA.pem
	I0731 10:43:27.878987   51156 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
	I0731 10:43:27.884771   51156 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
	I0731 10:43:27.892369   51156 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/12537.pem && ln -fs /usr/share/ca-certificates/12537.pem /etc/ssl/certs/12537.pem"
	I0731 10:43:27.900459   51156 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/12537.pem
	I0731 10:43:27.903536   51156 certs.go:480] hashing: -rw-r--r-- 1 root root 1338 Jul 31 10:39 /usr/share/ca-certificates/12537.pem
	I0731 10:43:27.903572   51156 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/12537.pem
	I0731 10:43:27.909271   51156 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/12537.pem /etc/ssl/certs/51391683.0"
	I0731 10:43:27.917349   51156 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/125372.pem && ln -fs /usr/share/ca-certificates/125372.pem /etc/ssl/certs/125372.pem"
	I0731 10:43:27.924915   51156 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/125372.pem
	I0731 10:43:27.927737   51156 certs.go:480] hashing: -rw-r--r-- 1 root root 1708 Jul 31 10:39 /usr/share/ca-certificates/125372.pem
	I0731 10:43:27.927767   51156 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/125372.pem
	I0731 10:43:27.933320   51156 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/125372.pem /etc/ssl/certs/3ec20f2e.0"
	I0731 10:43:27.941283   51156 ssh_runner.go:195] Run: ls /var/lib/minikube/certs/etcd
	I0731 10:43:27.944296   51156 certs.go:353] certs directory doesn't exist, likely first start: ls /var/lib/minikube/certs/etcd: Process exited with status 2
	stdout:
	
	stderr:
	ls: cannot access '/var/lib/minikube/certs/etcd': No such file or directory
	I0731 10:43:27.944351   51156 kubeadm.go:404] StartCluster: {Name:ingress-addon-legacy-538476 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 Memory:4096 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.18.20 ClusterName:ingress-addon-legacy-538476 Namespace:default APIServerName:minikubeCA APIServerNames:[]
APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.18.20 ContainerRuntime:crio ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMet
rics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0}
	I0731 10:43:27.944443   51156 cri.go:54] listing CRI containers in root : {State:paused Name: Namespaces:[kube-system]}
	I0731 10:43:27.944491   51156 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
	I0731 10:43:27.977005   51156 cri.go:89] found id: ""
	I0731 10:43:27.977057   51156 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
	I0731 10:43:27.984963   51156 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
	I0731 10:43:27.992494   51156 kubeadm.go:226] ignoring SystemVerification for kubeadm because of docker driver
	I0731 10:43:27.992531   51156 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
	I0731 10:43:27.999964   51156 kubeadm.go:152] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
	stdout:
	
	stderr:
	ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
	I0731 10:43:27.999997   51156 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.18.20:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml  --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
	I0731 10:43:28.041449   51156 kubeadm.go:322] [init] Using Kubernetes version: v1.18.20
	I0731 10:43:28.041516   51156 kubeadm.go:322] [preflight] Running pre-flight checks
	I0731 10:43:28.076976   51156 kubeadm.go:322] [preflight] The system verification failed. Printing the output from the verification:
	I0731 10:43:28.077071   51156 kubeadm.go:322] KERNEL_VERSION: 5.15.0-1038-gcp
	I0731 10:43:28.077155   51156 kubeadm.go:322] OS: Linux
	I0731 10:43:28.077239   51156 kubeadm.go:322] CGROUPS_CPU: enabled
	I0731 10:43:28.077321   51156 kubeadm.go:322] CGROUPS_CPUACCT: enabled
	I0731 10:43:28.077380   51156 kubeadm.go:322] CGROUPS_CPUSET: enabled
	I0731 10:43:28.077452   51156 kubeadm.go:322] CGROUPS_DEVICES: enabled
	I0731 10:43:28.077525   51156 kubeadm.go:322] CGROUPS_FREEZER: enabled
	I0731 10:43:28.077602   51156 kubeadm.go:322] CGROUPS_MEMORY: enabled
	I0731 10:43:28.140154   51156 kubeadm.go:322] [preflight] Pulling images required for setting up a Kubernetes cluster
	I0731 10:43:28.140260   51156 kubeadm.go:322] [preflight] This might take a minute or two, depending on the speed of your internet connection
	I0731 10:43:28.140362   51156 kubeadm.go:322] [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
	I0731 10:43:28.308286   51156 kubeadm.go:322] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
	I0731 10:43:28.309131   51156 kubeadm.go:322] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
	I0731 10:43:28.309258   51156 kubeadm.go:322] [kubelet-start] Starting the kubelet
	I0731 10:43:28.380396   51156 kubeadm.go:322] [certs] Using certificateDir folder "/var/lib/minikube/certs"
	I0731 10:43:28.383976   51156 out.go:204]   - Generating certificates and keys ...
	I0731 10:43:28.384100   51156 kubeadm.go:322] [certs] Using existing ca certificate authority
	I0731 10:43:28.384182   51156 kubeadm.go:322] [certs] Using existing apiserver certificate and key on disk
	I0731 10:43:28.699320   51156 kubeadm.go:322] [certs] Generating "apiserver-kubelet-client" certificate and key
	I0731 10:43:28.971701   51156 kubeadm.go:322] [certs] Generating "front-proxy-ca" certificate and key
	I0731 10:43:29.220568   51156 kubeadm.go:322] [certs] Generating "front-proxy-client" certificate and key
	I0731 10:43:29.460748   51156 kubeadm.go:322] [certs] Generating "etcd/ca" certificate and key
	I0731 10:43:29.671143   51156 kubeadm.go:322] [certs] Generating "etcd/server" certificate and key
	I0731 10:43:29.671343   51156 kubeadm.go:322] [certs] etcd/server serving cert is signed for DNS names [ingress-addon-legacy-538476 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
	I0731 10:43:29.852437   51156 kubeadm.go:322] [certs] Generating "etcd/peer" certificate and key
	I0731 10:43:29.852619   51156 kubeadm.go:322] [certs] etcd/peer serving cert is signed for DNS names [ingress-addon-legacy-538476 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
	I0731 10:43:30.025141   51156 kubeadm.go:322] [certs] Generating "etcd/healthcheck-client" certificate and key
	I0731 10:43:30.406597   51156 kubeadm.go:322] [certs] Generating "apiserver-etcd-client" certificate and key
	I0731 10:43:30.524593   51156 kubeadm.go:322] [certs] Generating "sa" key and public key
	I0731 10:43:30.524753   51156 kubeadm.go:322] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
	I0731 10:43:30.702026   51156 kubeadm.go:322] [kubeconfig] Writing "admin.conf" kubeconfig file
	I0731 10:43:30.936874   51156 kubeadm.go:322] [kubeconfig] Writing "kubelet.conf" kubeconfig file
	I0731 10:43:30.991885   51156 kubeadm.go:322] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
	I0731 10:43:31.161022   51156 kubeadm.go:322] [kubeconfig] Writing "scheduler.conf" kubeconfig file
	I0731 10:43:31.161562   51156 kubeadm.go:322] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
	I0731 10:43:31.164552   51156 out.go:204]   - Booting up control plane ...
	I0731 10:43:31.164672   51156 kubeadm.go:322] [control-plane] Creating static Pod manifest for "kube-apiserver"
	I0731 10:43:31.167870   51156 kubeadm.go:322] [control-plane] Creating static Pod manifest for "kube-controller-manager"
	I0731 10:43:31.168814   51156 kubeadm.go:322] [control-plane] Creating static Pod manifest for "kube-scheduler"
	I0731 10:43:31.169500   51156 kubeadm.go:322] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
	I0731 10:43:31.172299   51156 kubeadm.go:322] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
	I0731 10:43:38.174368   51156 kubeadm.go:322] [apiclient] All control plane components are healthy after 7.002566 seconds
	I0731 10:43:38.174509   51156 kubeadm.go:322] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
	I0731 10:43:38.183972   51156 kubeadm.go:322] [kubelet] Creating a ConfigMap "kubelet-config-1.18" in namespace kube-system with the configuration for the kubelets in the cluster
	I0731 10:43:38.700253   51156 kubeadm.go:322] [upload-certs] Skipping phase. Please see --upload-certs
	I0731 10:43:38.700482   51156 kubeadm.go:322] [mark-control-plane] Marking the node ingress-addon-legacy-538476 as control-plane by adding the label "node-role.kubernetes.io/master=''"
	I0731 10:43:39.208519   51156 kubeadm.go:322] [bootstrap-token] Using token: a58bl0.yhsg1lzgjtkbs53f
	I0731 10:43:39.210007   51156 out.go:204]   - Configuring RBAC rules ...
	I0731 10:43:39.210169   51156 kubeadm.go:322] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
	I0731 10:43:39.213914   51156 kubeadm.go:322] [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
	I0731 10:43:39.220612   51156 kubeadm.go:322] [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
	I0731 10:43:39.222163   51156 kubeadm.go:322] [bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
	I0731 10:43:39.224025   51156 kubeadm.go:322] [bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
	I0731 10:43:39.225787   51156 kubeadm.go:322] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
	I0731 10:43:39.233443   51156 kubeadm.go:322] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
	I0731 10:43:39.432371   51156 kubeadm.go:322] [addons] Applied essential addon: CoreDNS
	I0731 10:43:39.618534   51156 kubeadm.go:322] [addons] Applied essential addon: kube-proxy
	I0731 10:43:39.619543   51156 kubeadm.go:322] 
	I0731 10:43:39.619647   51156 kubeadm.go:322] Your Kubernetes control-plane has initialized successfully!
	I0731 10:43:39.619655   51156 kubeadm.go:322] 
	I0731 10:43:39.619769   51156 kubeadm.go:322] To start using your cluster, you need to run the following as a regular user:
	I0731 10:43:39.619799   51156 kubeadm.go:322] 
	I0731 10:43:39.619860   51156 kubeadm.go:322]   mkdir -p $HOME/.kube
	I0731 10:43:39.619946   51156 kubeadm.go:322]   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
	I0731 10:43:39.620019   51156 kubeadm.go:322]   sudo chown $(id -u):$(id -g) $HOME/.kube/config
	I0731 10:43:39.620031   51156 kubeadm.go:322] 
	I0731 10:43:39.620107   51156 kubeadm.go:322] You should now deploy a pod network to the cluster.
	I0731 10:43:39.620231   51156 kubeadm.go:322] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
	I0731 10:43:39.620346   51156 kubeadm.go:322]   https://kubernetes.io/docs/concepts/cluster-administration/addons/
	I0731 10:43:39.620358   51156 kubeadm.go:322] 
	I0731 10:43:39.620474   51156 kubeadm.go:322] You can now join any number of control-plane nodes by copying certificate authorities
	I0731 10:43:39.620558   51156 kubeadm.go:322] and service account keys on each node and then running the following as root:
	I0731 10:43:39.620568   51156 kubeadm.go:322] 
	I0731 10:43:39.620677   51156 kubeadm.go:322]   kubeadm join control-plane.minikube.internal:8443 --token a58bl0.yhsg1lzgjtkbs53f \
	I0731 10:43:39.620803   51156 kubeadm.go:322]     --discovery-token-ca-cert-hash sha256:332f1e2c8f6e50afb8fc2995698452a3be7de85c383b512a61c05acf2d3047a4 \
	I0731 10:43:39.620835   51156 kubeadm.go:322]     --control-plane 
	I0731 10:43:39.620843   51156 kubeadm.go:322] 
	I0731 10:43:39.620980   51156 kubeadm.go:322] Then you can join any number of worker nodes by running the following on each as root:
	I0731 10:43:39.620996   51156 kubeadm.go:322] 
	I0731 10:43:39.621105   51156 kubeadm.go:322] kubeadm join control-plane.minikube.internal:8443 --token a58bl0.yhsg1lzgjtkbs53f \
	I0731 10:43:39.621215   51156 kubeadm.go:322]     --discovery-token-ca-cert-hash sha256:332f1e2c8f6e50afb8fc2995698452a3be7de85c383b512a61c05acf2d3047a4 
	I0731 10:43:39.622867   51156 kubeadm.go:322] W0731 10:43:28.040870    1382 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
	I0731 10:43:39.623074   51156 kubeadm.go:322] 	[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1038-gcp\n", err: exit status 1
	I0731 10:43:39.623229   51156 kubeadm.go:322] 	[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
	I0731 10:43:39.623384   51156 kubeadm.go:322] W0731 10:43:31.167613    1382 manifests.go:225] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
	I0731 10:43:39.623523   51156 kubeadm.go:322] W0731 10:43:31.168588    1382 manifests.go:225] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
	I0731 10:43:39.623536   51156 cni.go:84] Creating CNI manager for ""
	I0731 10:43:39.623541   51156 cni.go:143] "docker" driver + "crio" runtime found, recommending kindnet
	I0731 10:43:39.625228   51156 out.go:177] * Configuring CNI (Container Networking Interface) ...
	I0731 10:43:39.626666   51156 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
	I0731 10:43:39.630264   51156 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.18.20/kubectl ...
	I0731 10:43:39.630278   51156 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2438 bytes)
	I0731 10:43:39.645288   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
	I0731 10:43:40.046291   51156 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
	I0731 10:43:40.046348   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:40.046351   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl label nodes minikube.k8s.io/version=v1.31.1 minikube.k8s.io/commit=a7848ba25aaaad8ebb50e721c0d343e471188fc7 minikube.k8s.io/name=ingress-addon-legacy-538476 minikube.k8s.io/updated_at=2023_07_31T10_43_40_0700 minikube.k8s.io/primary=true --all --overwrite --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:40.151800   51156 ops.go:34] apiserver oom_adj: -16
	I0731 10:43:40.151817   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:40.237181   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:40.800834   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:41.300338   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:41.800863   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:42.300260   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:42.800178   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:43.300855   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:43.800228   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:44.300984   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:44.800417   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:45.301166   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:45.801221   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:46.300968   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:46.800559   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:47.300547   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:47.800546   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:48.300817   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:48.800430   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:49.300709   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:49.800804   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:50.300868   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:50.800719   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:51.300777   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:51.800186   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:52.300779   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:52.800206   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:53.301102   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:53.800299   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:54.300458   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:54.800413   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:55.301149   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:55.801153   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:56.300467   51156 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.18.20/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:43:56.364532   51156 kubeadm.go:1081] duration metric: took 16.318223526s to wait for elevateKubeSystemPrivileges.
	I0731 10:43:56.364566   51156 kubeadm.go:406] StartCluster complete in 28.420222051s
	I0731 10:43:56.364600   51156 settings.go:142] acquiring lock: {Name:mk1af30688f984f447d2a45e33362813edbbcab6 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0731 10:43:56.364676   51156 settings.go:150] Updating kubeconfig:  /home/jenkins/minikube-integration/16969-5799/kubeconfig
	I0731 10:43:56.365472   51156 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/16969-5799/kubeconfig: {Name:mkf8010bda730fc5f9ac63bea8b114101911b8e2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0731 10:43:56.365686   51156 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.18.20/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
	I0731 10:43:56.365796   51156 addons.go:499] enable addons start: toEnable=map[ambassador:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false helm-tiller:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false volumesnapshots:false]
	I0731 10:43:56.365902   51156 config.go:182] Loaded profile config "ingress-addon-legacy-538476": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.18.20
	I0731 10:43:56.365918   51156 addons.go:69] Setting default-storageclass=true in profile "ingress-addon-legacy-538476"
	I0731 10:43:56.365905   51156 addons.go:69] Setting storage-provisioner=true in profile "ingress-addon-legacy-538476"
	I0731 10:43:56.365947   51156 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "ingress-addon-legacy-538476"
	I0731 10:43:56.365957   51156 addons.go:231] Setting addon storage-provisioner=true in "ingress-addon-legacy-538476"
	I0731 10:43:56.366006   51156 host.go:66] Checking if "ingress-addon-legacy-538476" exists ...
	I0731 10:43:56.366394   51156 cli_runner.go:164] Run: docker container inspect ingress-addon-legacy-538476 --format={{.State.Status}}
	I0731 10:43:56.366339   51156 kapi.go:59] client config for ingress-addon-legacy-538476: &rest.Config{Host:"https://192.168.49.2:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/client.crt", KeyFile:"/home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/client.key", CAFile:"/home/jenkins/minikube-integration/16969-5799/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(ni
l), CAData:[]uint8(nil), NextProtos:[]string(nil)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x19c2840), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
	I0731 10:43:56.366547   51156 cli_runner.go:164] Run: docker container inspect ingress-addon-legacy-538476 --format={{.State.Status}}
	I0731 10:43:56.367180   51156 cert_rotation.go:137] Starting client certificate rotation controller
	I0731 10:43:56.383703   51156 kapi.go:248] "coredns" deployment in "kube-system" namespace and "ingress-addon-legacy-538476" context rescaled to 1 replicas
	I0731 10:43:56.383777   51156 start.go:223] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.18.20 ContainerRuntime:crio ControlPlane:true Worker:true}
	I0731 10:43:56.386957   51156 out.go:177] * Verifying Kubernetes components...
	I0731 10:43:56.388525   51156 out.go:177]   - Using image gcr.io/k8s-minikube/storage-provisioner:v5
	I0731 10:43:56.386219   51156 kapi.go:59] client config for ingress-addon-legacy-538476: &rest.Config{Host:"https://192.168.49.2:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/client.crt", KeyFile:"/home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/client.key", CAFile:"/home/jenkins/minikube-integration/16969-5799/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(ni
l), CAData:[]uint8(nil), NextProtos:[]string(nil)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x19c2840), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
	I0731 10:43:56.388461   51156 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I0731 10:43:56.390284   51156 addons.go:423] installing /etc/kubernetes/addons/storage-provisioner.yaml
	I0731 10:43:56.390306   51156 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
	I0731 10:43:56.390355   51156 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ingress-addon-legacy-538476
	I0731 10:43:56.392796   51156 addons.go:231] Setting addon default-storageclass=true in "ingress-addon-legacy-538476"
	I0731 10:43:56.392835   51156 host.go:66] Checking if "ingress-addon-legacy-538476" exists ...
	I0731 10:43:56.393197   51156 cli_runner.go:164] Run: docker container inspect ingress-addon-legacy-538476 --format={{.State.Status}}
	I0731 10:43:56.408450   51156 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32787 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/ingress-addon-legacy-538476/id_rsa Username:docker}
	I0731 10:43:56.412389   51156 addons.go:423] installing /etc/kubernetes/addons/storageclass.yaml
	I0731 10:43:56.412407   51156 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
	I0731 10:43:56.412461   51156 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ingress-addon-legacy-538476
	I0731 10:43:56.429153   51156 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32787 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/ingress-addon-legacy-538476/id_rsa Username:docker}
	I0731 10:43:56.448190   51156 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.18.20/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^        forward . \/etc\/resolv.conf.*/i \        hosts {\n           192.168.49.1 host.minikube.internal\n           fallthrough\n        }' -e '/^        errors *$/i \        log' | sudo /var/lib/minikube/binaries/v1.18.20/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
	I0731 10:43:56.448652   51156 kapi.go:59] client config for ingress-addon-legacy-538476: &rest.Config{Host:"https://192.168.49.2:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/client.crt", KeyFile:"/home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/client.key", CAFile:"/home/jenkins/minikube-integration/16969-5799/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(ni
l), CAData:[]uint8(nil), NextProtos:[]string(nil)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x19c2840), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
	I0731 10:43:56.448965   51156 node_ready.go:35] waiting up to 6m0s for node "ingress-addon-legacy-538476" to be "Ready" ...
	I0731 10:43:56.529155   51156 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.18.20/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
	I0731 10:43:56.613608   51156 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.18.20/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
	I0731 10:43:57.128520   51156 start.go:901] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
	I0731 10:43:57.324174   51156 out.go:177] * Enabled addons: storage-provisioner, default-storageclass
	I0731 10:43:57.325497   51156 addons.go:502] enable addons completed in 959.702969ms: enabled=[storage-provisioner default-storageclass]
	I0731 10:43:58.456438   51156 node_ready.go:58] node "ingress-addon-legacy-538476" has status "Ready":"False"
	I0731 10:44:00.631678   51156 node_ready.go:58] node "ingress-addon-legacy-538476" has status "Ready":"False"
	I0731 10:44:02.957069   51156 node_ready.go:58] node "ingress-addon-legacy-538476" has status "Ready":"False"
	I0731 10:44:05.457185   51156 node_ready.go:58] node "ingress-addon-legacy-538476" has status "Ready":"False"
	I0731 10:44:07.956632   51156 node_ready.go:58] node "ingress-addon-legacy-538476" has status "Ready":"False"
	I0731 10:44:09.956708   51156 node_ready.go:49] node "ingress-addon-legacy-538476" has status "Ready":"True"
	I0731 10:44:09.956730   51156 node_ready.go:38] duration metric: took 13.507745875s waiting for node "ingress-addon-legacy-538476" to be "Ready" ...
	I0731 10:44:09.956738   51156 pod_ready.go:35] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
	I0731 10:44:09.962749   51156 pod_ready.go:78] waiting up to 6m0s for pod "coredns-66bff467f8-2ldkm" in "kube-system" namespace to be "Ready" ...
	I0731 10:44:11.969690   51156 pod_ready.go:102] pod "coredns-66bff467f8-2ldkm" in "kube-system" namespace doesn't have "Ready" status: {Phase:Pending Conditions:[{Type:PodScheduled Status:False LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2023-07-31 10:43:56 +0000 UTC Reason:Unschedulable Message:0/1 nodes are available: 1 node(s) had taint {node.kubernetes.io/not-ready: }, that the pod didn't tolerate.}] Message: Reason: NominatedNodeName: HostIP: PodIP: PodIPs:[] StartTime:<nil> InitContainerStatuses:[] ContainerStatuses:[] QOSClass:Burstable EphemeralContainerStatuses:[] Resize:}
	I0731 10:44:14.469292   51156 pod_ready.go:102] pod "coredns-66bff467f8-2ldkm" in "kube-system" namespace doesn't have "Ready" status: {Phase:Pending Conditions:[{Type:PodScheduled Status:False LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2023-07-31 10:43:56 +0000 UTC Reason:Unschedulable Message:0/1 nodes are available: 1 node(s) had taint {node.kubernetes.io/not-ready: }, that the pod didn't tolerate.}] Message: Reason: NominatedNodeName: HostIP: PodIP: PodIPs:[] StartTime:<nil> InitContainerStatuses:[] ContainerStatuses:[] QOSClass:Burstable EphemeralContainerStatuses:[] Resize:}
	I0731 10:44:16.471611   51156 pod_ready.go:102] pod "coredns-66bff467f8-2ldkm" in "kube-system" namespace has status "Ready":"False"
	I0731 10:44:18.471884   51156 pod_ready.go:102] pod "coredns-66bff467f8-2ldkm" in "kube-system" namespace has status "Ready":"False"
	I0731 10:44:20.470659   51156 pod_ready.go:92] pod "coredns-66bff467f8-2ldkm" in "kube-system" namespace has status "Ready":"True"
	I0731 10:44:20.470690   51156 pod_ready.go:81] duration metric: took 10.507918892s waiting for pod "coredns-66bff467f8-2ldkm" in "kube-system" namespace to be "Ready" ...
	I0731 10:44:20.470705   51156 pod_ready.go:78] waiting up to 6m0s for pod "etcd-ingress-addon-legacy-538476" in "kube-system" namespace to be "Ready" ...
	I0731 10:44:20.474746   51156 pod_ready.go:92] pod "etcd-ingress-addon-legacy-538476" in "kube-system" namespace has status "Ready":"True"
	I0731 10:44:20.474763   51156 pod_ready.go:81] duration metric: took 4.051608ms waiting for pod "etcd-ingress-addon-legacy-538476" in "kube-system" namespace to be "Ready" ...
	I0731 10:44:20.474777   51156 pod_ready.go:78] waiting up to 6m0s for pod "kube-apiserver-ingress-addon-legacy-538476" in "kube-system" namespace to be "Ready" ...
	I0731 10:44:20.478497   51156 pod_ready.go:92] pod "kube-apiserver-ingress-addon-legacy-538476" in "kube-system" namespace has status "Ready":"True"
	I0731 10:44:20.478519   51156 pod_ready.go:81] duration metric: took 3.731108ms waiting for pod "kube-apiserver-ingress-addon-legacy-538476" in "kube-system" namespace to be "Ready" ...
	I0731 10:44:20.478531   51156 pod_ready.go:78] waiting up to 6m0s for pod "kube-controller-manager-ingress-addon-legacy-538476" in "kube-system" namespace to be "Ready" ...
	I0731 10:44:20.482690   51156 pod_ready.go:92] pod "kube-controller-manager-ingress-addon-legacy-538476" in "kube-system" namespace has status "Ready":"True"
	I0731 10:44:20.482714   51156 pod_ready.go:81] duration metric: took 4.171903ms waiting for pod "kube-controller-manager-ingress-addon-legacy-538476" in "kube-system" namespace to be "Ready" ...
	I0731 10:44:20.482726   51156 pod_ready.go:78] waiting up to 6m0s for pod "kube-proxy-8p84m" in "kube-system" namespace to be "Ready" ...
	I0731 10:44:20.486371   51156 pod_ready.go:92] pod "kube-proxy-8p84m" in "kube-system" namespace has status "Ready":"True"
	I0731 10:44:20.486385   51156 pod_ready.go:81] duration metric: took 3.65235ms waiting for pod "kube-proxy-8p84m" in "kube-system" namespace to be "Ready" ...
	I0731 10:44:20.486393   51156 pod_ready.go:78] waiting up to 6m0s for pod "kube-scheduler-ingress-addon-legacy-538476" in "kube-system" namespace to be "Ready" ...
	I0731 10:44:20.666753   51156 request.go:628] Waited for 180.308434ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-scheduler-ingress-addon-legacy-538476
	I0731 10:44:20.866745   51156 request.go:628] Waited for 197.35568ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/nodes/ingress-addon-legacy-538476
	I0731 10:44:20.869262   51156 pod_ready.go:92] pod "kube-scheduler-ingress-addon-legacy-538476" in "kube-system" namespace has status "Ready":"True"
	I0731 10:44:20.869280   51156 pod_ready.go:81] duration metric: took 382.88192ms waiting for pod "kube-scheduler-ingress-addon-legacy-538476" in "kube-system" namespace to be "Ready" ...
	I0731 10:44:20.869290   51156 pod_ready.go:38] duration metric: took 10.912536621s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
	I0731 10:44:20.869305   51156 api_server.go:52] waiting for apiserver process to appear ...
	I0731 10:44:20.869376   51156 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I0731 10:44:20.879572   51156 api_server.go:72] duration metric: took 24.495761459s to wait for apiserver process to appear ...
	I0731 10:44:20.879591   51156 api_server.go:88] waiting for apiserver healthz status ...
	I0731 10:44:20.879603   51156 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
	I0731 10:44:20.884317   51156 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
	ok
	I0731 10:44:20.885044   51156 api_server.go:141] control plane version: v1.18.20
	I0731 10:44:20.885065   51156 api_server.go:131] duration metric: took 5.469449ms to wait for apiserver health ...
	I0731 10:44:20.885073   51156 system_pods.go:43] waiting for kube-system pods to appear ...
	I0731 10:44:21.067367   51156 request.go:628] Waited for 182.216027ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods
	I0731 10:44:21.072629   51156 system_pods.go:59] 8 kube-system pods found
	I0731 10:44:21.072662   51156 system_pods.go:61] "coredns-66bff467f8-2ldkm" [b34b0606-480a-44de-8aac-1aa3199f99cf] Running
	I0731 10:44:21.072669   51156 system_pods.go:61] "etcd-ingress-addon-legacy-538476" [bf211bcc-cb21-42f4-8a7a-5b3cc660ba1f] Running
	I0731 10:44:21.072672   51156 system_pods.go:61] "kindnet-djtg2" [43178035-f884-49e0-8076-49f6ab2acca2] Running
	I0731 10:44:21.072676   51156 system_pods.go:61] "kube-apiserver-ingress-addon-legacy-538476" [74212eaa-4032-447f-9403-dd98913f0c49] Running
	I0731 10:44:21.072684   51156 system_pods.go:61] "kube-controller-manager-ingress-addon-legacy-538476" [e43093b2-eac6-44f8-98ed-a6b7c9e444f1] Running
	I0731 10:44:21.072688   51156 system_pods.go:61] "kube-proxy-8p84m" [a3265ad1-8f6a-4e52-912e-148a85aa5f9b] Running
	I0731 10:44:21.072692   51156 system_pods.go:61] "kube-scheduler-ingress-addon-legacy-538476" [184ba7f0-86d8-4cae-8609-c05e46eb788b] Running
	I0731 10:44:21.072696   51156 system_pods.go:61] "storage-provisioner" [18c38aed-e1dd-408b-b123-ff79671d5ba9] Running
	I0731 10:44:21.072701   51156 system_pods.go:74] duration metric: took 187.623881ms to wait for pod list to return data ...
	I0731 10:44:21.072712   51156 default_sa.go:34] waiting for default service account to be created ...
	I0731 10:44:21.267131   51156 request.go:628] Waited for 194.346421ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/namespaces/default/serviceaccounts
	I0731 10:44:21.269309   51156 default_sa.go:45] found service account: "default"
	I0731 10:44:21.269331   51156 default_sa.go:55] duration metric: took 196.613767ms for default service account to be created ...
	I0731 10:44:21.269338   51156 system_pods.go:116] waiting for k8s-apps to be running ...
	I0731 10:44:21.466741   51156 request.go:628] Waited for 197.349724ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods
	I0731 10:44:21.472056   51156 system_pods.go:86] 8 kube-system pods found
	I0731 10:44:21.472080   51156 system_pods.go:89] "coredns-66bff467f8-2ldkm" [b34b0606-480a-44de-8aac-1aa3199f99cf] Running
	I0731 10:44:21.472086   51156 system_pods.go:89] "etcd-ingress-addon-legacy-538476" [bf211bcc-cb21-42f4-8a7a-5b3cc660ba1f] Running
	I0731 10:44:21.472093   51156 system_pods.go:89] "kindnet-djtg2" [43178035-f884-49e0-8076-49f6ab2acca2] Running
	I0731 10:44:21.472097   51156 system_pods.go:89] "kube-apiserver-ingress-addon-legacy-538476" [74212eaa-4032-447f-9403-dd98913f0c49] Running
	I0731 10:44:21.472101   51156 system_pods.go:89] "kube-controller-manager-ingress-addon-legacy-538476" [e43093b2-eac6-44f8-98ed-a6b7c9e444f1] Running
	I0731 10:44:21.472105   51156 system_pods.go:89] "kube-proxy-8p84m" [a3265ad1-8f6a-4e52-912e-148a85aa5f9b] Running
	I0731 10:44:21.472109   51156 system_pods.go:89] "kube-scheduler-ingress-addon-legacy-538476" [184ba7f0-86d8-4cae-8609-c05e46eb788b] Running
	I0731 10:44:21.472113   51156 system_pods.go:89] "storage-provisioner" [18c38aed-e1dd-408b-b123-ff79671d5ba9] Running
	I0731 10:44:21.472118   51156 system_pods.go:126] duration metric: took 202.776555ms to wait for k8s-apps to be running ...
	I0731 10:44:21.472130   51156 system_svc.go:44] waiting for kubelet service to be running ....
	I0731 10:44:21.472169   51156 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I0731 10:44:21.483764   51156 system_svc.go:56] duration metric: took 11.621098ms WaitForService to wait for kubelet.
	I0731 10:44:21.483789   51156 kubeadm.go:581] duration metric: took 25.099980071s to wait for : map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] ...
	I0731 10:44:21.483821   51156 node_conditions.go:102] verifying NodePressure condition ...
	I0731 10:44:21.667235   51156 request.go:628] Waited for 183.32585ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/nodes
	I0731 10:44:21.669880   51156 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
	I0731 10:44:21.669903   51156 node_conditions.go:123] node cpu capacity is 8
	I0731 10:44:21.669912   51156 node_conditions.go:105] duration metric: took 186.085772ms to run NodePressure ...
	I0731 10:44:21.669924   51156 start.go:228] waiting for startup goroutines ...
	I0731 10:44:21.669930   51156 start.go:233] waiting for cluster config update ...
	I0731 10:44:21.669938   51156 start.go:242] writing updated cluster config ...
	I0731 10:44:21.670216   51156 ssh_runner.go:195] Run: rm -f paused
	I0731 10:44:21.713636   51156 start.go:596] kubectl: 1.27.4, cluster: 1.18.20 (minor skew: 9)
	I0731 10:44:21.716220   51156 out.go:177] 
	W0731 10:44:21.717811   51156 out.go:239] ! /usr/local/bin/kubectl is version 1.27.4, which may have incompatibilities with Kubernetes 1.18.20.
	I0731 10:44:21.719238   51156 out.go:177]   - Want kubectl v1.18.20? Try 'minikube kubectl -- get pods -A'
	I0731 10:44:21.720701   51156 out.go:177] * Done! kubectl is now configured to use "ingress-addon-legacy-538476" cluster and "default" namespace by default
	
	* 
	* ==> CRI-O <==
	* Jul 31 10:47:02 ingress-addon-legacy-538476 crio[952]: time="2023-07-31 10:47:02.879147850Z" level=info msg="Started container" PID=4813 containerID=a8609dd14dce40982971e30272a9f4399b5db9901b7df1f6d0c2ddbee032f422 description=default/hello-world-app-5f5d8b66bb-c9xpv/hello-world-app id=937282ba-4ec9-4e24-8c16-6a52abdd9e0a name=/runtime.v1alpha2.RuntimeService/StartContainer sandboxID=9f3bd927bbbbb189662ac2fe53be5964605361dcc1f166b2b25673a492ee2d99
	Jul 31 10:47:12 ingress-addon-legacy-538476 crio[952]: time="2023-07-31 10:47:12.818056659Z" level=info msg="Checking image status: cryptexlabs/minikube-ingress-dns:0.3.0@sha256:e252d2a4c704027342b303cc563e95d2e71d2a0f1404f55d676390e28d5093ab" id=1f53798d-c7c2-450a-87b5-0fd016e87637 name=/runtime.v1alpha2.ImageService/ImageStatus
	Jul 31 10:47:17 ingress-addon-legacy-538476 crio[952]: time="2023-07-31 10:47:17.818720316Z" level=info msg="Stopping pod sandbox: 40c7a44467d2ddf77f3705915b3a91dda60aba8a6f35f1d3c1d99e9141b06549" id=02c6fb76-ac57-4f77-a26f-60ea2bac65bf name=/runtime.v1alpha2.RuntimeService/StopPodSandbox
	Jul 31 10:47:17 ingress-addon-legacy-538476 crio[952]: time="2023-07-31 10:47:17.819735483Z" level=info msg="Stopped pod sandbox: 40c7a44467d2ddf77f3705915b3a91dda60aba8a6f35f1d3c1d99e9141b06549" id=02c6fb76-ac57-4f77-a26f-60ea2bac65bf name=/runtime.v1alpha2.RuntimeService/StopPodSandbox
	Jul 31 10:47:18 ingress-addon-legacy-538476 crio[952]: time="2023-07-31 10:47:18.261920217Z" level=info msg="Stopping pod sandbox: 40c7a44467d2ddf77f3705915b3a91dda60aba8a6f35f1d3c1d99e9141b06549" id=79a454bb-89a0-4191-9d5c-5a3b3ee9b3da name=/runtime.v1alpha2.RuntimeService/StopPodSandbox
	Jul 31 10:47:18 ingress-addon-legacy-538476 crio[952]: time="2023-07-31 10:47:18.261977234Z" level=info msg="Stopped pod sandbox (already stopped): 40c7a44467d2ddf77f3705915b3a91dda60aba8a6f35f1d3c1d99e9141b06549" id=79a454bb-89a0-4191-9d5c-5a3b3ee9b3da name=/runtime.v1alpha2.RuntimeService/StopPodSandbox
	Jul 31 10:47:18 ingress-addon-legacy-538476 crio[952]: time="2023-07-31 10:47:18.994312854Z" level=info msg="Stopping container: a480c5299f886ca37b1ba36f1910e32a0e1ed0784d90fb99faadd81f9992197a (timeout: 2s)" id=aff1470e-7877-453a-a9d8-179203efb2ee name=/runtime.v1alpha2.RuntimeService/StopContainer
	Jul 31 10:47:18 ingress-addon-legacy-538476 crio[952]: time="2023-07-31 10:47:18.997357804Z" level=info msg="Stopping container: a480c5299f886ca37b1ba36f1910e32a0e1ed0784d90fb99faadd81f9992197a (timeout: 2s)" id=84e61719-4843-4d00-9b20-6bac46475f43 name=/runtime.v1alpha2.RuntimeService/StopContainer
	Jul 31 10:47:19 ingress-addon-legacy-538476 crio[952]: time="2023-07-31 10:47:19.817733576Z" level=info msg="Stopping pod sandbox: 40c7a44467d2ddf77f3705915b3a91dda60aba8a6f35f1d3c1d99e9141b06549" id=94cb1aee-b97c-4573-a66b-9b6f5e8ac607 name=/runtime.v1alpha2.RuntimeService/StopPodSandbox
	Jul 31 10:47:19 ingress-addon-legacy-538476 crio[952]: time="2023-07-31 10:47:19.817789841Z" level=info msg="Stopped pod sandbox (already stopped): 40c7a44467d2ddf77f3705915b3a91dda60aba8a6f35f1d3c1d99e9141b06549" id=94cb1aee-b97c-4573-a66b-9b6f5e8ac607 name=/runtime.v1alpha2.RuntimeService/StopPodSandbox
	Jul 31 10:47:21 ingress-addon-legacy-538476 crio[952]: time="2023-07-31 10:47:21.003997271Z" level=warning msg="Stopping container a480c5299f886ca37b1ba36f1910e32a0e1ed0784d90fb99faadd81f9992197a with stop signal timed out: timeout reached after 2 seconds waiting for container process to exit" id=aff1470e-7877-453a-a9d8-179203efb2ee name=/runtime.v1alpha2.RuntimeService/StopContainer
	Jul 31 10:47:21 ingress-addon-legacy-538476 conmon[3465]: conmon a480c5299f886ca37b1b <ninfo>: container 3477 exited with status 137
	Jul 31 10:47:21 ingress-addon-legacy-538476 crio[952]: time="2023-07-31 10:47:21.165337052Z" level=info msg="Stopped container a480c5299f886ca37b1ba36f1910e32a0e1ed0784d90fb99faadd81f9992197a: ingress-nginx/ingress-nginx-controller-7fcf777cb7-dmthl/controller" id=aff1470e-7877-453a-a9d8-179203efb2ee name=/runtime.v1alpha2.RuntimeService/StopContainer
	Jul 31 10:47:21 ingress-addon-legacy-538476 crio[952]: time="2023-07-31 10:47:21.165411757Z" level=info msg="Stopped container a480c5299f886ca37b1ba36f1910e32a0e1ed0784d90fb99faadd81f9992197a: ingress-nginx/ingress-nginx-controller-7fcf777cb7-dmthl/controller" id=84e61719-4843-4d00-9b20-6bac46475f43 name=/runtime.v1alpha2.RuntimeService/StopContainer
	Jul 31 10:47:21 ingress-addon-legacy-538476 crio[952]: time="2023-07-31 10:47:21.166001993Z" level=info msg="Stopping pod sandbox: 1a1d1bbcf0b6019d443a166a445fa40d0699e9488ac8d45cc1327ab0a8b0f31f" id=48687d22-cea5-458c-a74d-e25dfdb10bb8 name=/runtime.v1alpha2.RuntimeService/StopPodSandbox
	Jul 31 10:47:21 ingress-addon-legacy-538476 crio[952]: time="2023-07-31 10:47:21.166098016Z" level=info msg="Stopping pod sandbox: 1a1d1bbcf0b6019d443a166a445fa40d0699e9488ac8d45cc1327ab0a8b0f31f" id=5e38360d-0d6c-416f-b469-ca21d0e317ab name=/runtime.v1alpha2.RuntimeService/StopPodSandbox
	Jul 31 10:47:21 ingress-addon-legacy-538476 crio[952]: time="2023-07-31 10:47:21.168642413Z" level=info msg="Restoring iptables rules: *nat\n:KUBE-HOSTPORTS - [0:0]\n:KUBE-HP-N2UFC6VAY5MW7N5A - [0:0]\n:KUBE-HP-SSF6FJXGKR43BCD6 - [0:0]\n-X KUBE-HP-SSF6FJXGKR43BCD6\n-X KUBE-HP-N2UFC6VAY5MW7N5A\nCOMMIT\n"
	Jul 31 10:47:21 ingress-addon-legacy-538476 crio[952]: time="2023-07-31 10:47:21.169881197Z" level=info msg="Closing host port tcp:80"
	Jul 31 10:47:21 ingress-addon-legacy-538476 crio[952]: time="2023-07-31 10:47:21.169915291Z" level=info msg="Closing host port tcp:443"
	Jul 31 10:47:21 ingress-addon-legacy-538476 crio[952]: time="2023-07-31 10:47:21.171003721Z" level=info msg="Host port tcp:80 does not have an open socket"
	Jul 31 10:47:21 ingress-addon-legacy-538476 crio[952]: time="2023-07-31 10:47:21.171022782Z" level=info msg="Host port tcp:443 does not have an open socket"
	Jul 31 10:47:21 ingress-addon-legacy-538476 crio[952]: time="2023-07-31 10:47:21.171141427Z" level=info msg="Got pod network &{Name:ingress-nginx-controller-7fcf777cb7-dmthl Namespace:ingress-nginx ID:1a1d1bbcf0b6019d443a166a445fa40d0699e9488ac8d45cc1327ab0a8b0f31f UID:18984b9b-e7d5-4027-a1c5-d126db73b13f NetNS:/var/run/netns/469e91d6-7afc-4448-8947-692bfb194b47 Networks:[{Name:kindnet Ifname:eth0}] RuntimeConfig:map[kindnet:{IP: MAC: PortMappings:[] Bandwidth:<nil> IpRanges:[]}] Aliases:map[]}"
	Jul 31 10:47:21 ingress-addon-legacy-538476 crio[952]: time="2023-07-31 10:47:21.171269182Z" level=info msg="Deleting pod ingress-nginx_ingress-nginx-controller-7fcf777cb7-dmthl from CNI network \"kindnet\" (type=ptp)"
	Jul 31 10:47:21 ingress-addon-legacy-538476 crio[952]: time="2023-07-31 10:47:21.203610386Z" level=info msg="Stopped pod sandbox: 1a1d1bbcf0b6019d443a166a445fa40d0699e9488ac8d45cc1327ab0a8b0f31f" id=48687d22-cea5-458c-a74d-e25dfdb10bb8 name=/runtime.v1alpha2.RuntimeService/StopPodSandbox
	Jul 31 10:47:21 ingress-addon-legacy-538476 crio[952]: time="2023-07-31 10:47:21.203732220Z" level=info msg="Stopped pod sandbox (already stopped): 1a1d1bbcf0b6019d443a166a445fa40d0699e9488ac8d45cc1327ab0a8b0f31f" id=5e38360d-0d6c-416f-b469-ca21d0e317ab name=/runtime.v1alpha2.RuntimeService/StopPodSandbox
	
	* 
	* ==> container status <==
	* CONTAINER           IMAGE                                                                                                              CREATED             STATE               NAME                      ATTEMPT             POD ID              POD
	a8609dd14dce4       gcr.io/google-samples/hello-app@sha256:845f77fab71033404f4cfceaa1ddb27b70c3551ceb22a5e7f4498cdda6c9daea            23 seconds ago      Running             hello-world-app           0                   9f3bd927bbbbb       hello-world-app-5f5d8b66bb-c9xpv
	337a275982f7f       docker.io/library/nginx@sha256:2d194184b067db3598771b4cf326cfe6ad5051937ba1132b8b7d4b0184e0d0a6                    2 minutes ago       Running             nginx                     0                   d9591249192b5       nginx
	a480c5299f886       registry.k8s.io/ingress-nginx/controller@sha256:35fe394c82164efa8f47f3ed0be981b3f23da77175bbb8268a9ae438851c8324   2 minutes ago       Exited              controller                0                   1a1d1bbcf0b60       ingress-nginx-controller-7fcf777cb7-dmthl
	7efaf3cf81672       docker.io/jettech/kube-webhook-certgen@sha256:784853e84a0223f34ea58fe36766c2dbeb129b125d25f16b8468c903262b77f6     3 minutes ago       Exited              patch                     0                   355e09c48b8f4       ingress-nginx-admission-patch-fhh54
	203d6c0ebed2d       docker.io/jettech/kube-webhook-certgen@sha256:784853e84a0223f34ea58fe36766c2dbeb129b125d25f16b8468c903262b77f6     3 minutes ago       Exited              create                    0                   512bf2dc254d1       ingress-nginx-admission-create-42669
	7826cff4a502a       67da37a9a360e600e74464da48437257b00a754c77c40f60c65e4cb327c34bd5                                                   3 minutes ago       Running             coredns                   0                   87915ef1836ba       coredns-66bff467f8-2ldkm
	4edd9d79e3a3d       6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562                                                   3 minutes ago       Running             storage-provisioner       0                   ea6e2cae10549       storage-provisioner
	4a392a96cc39c       docker.io/kindest/kindnetd@sha256:6c00e28db008c2afa67d9ee085c86184ec9ae5281d5ae1bd15006746fb9a1974                 3 minutes ago       Running             kindnet-cni               0                   8198ad30fbb80       kindnet-djtg2
	607fef01d189f       27f8b8d51985f755cfb3ffea424fa34865cc0da63e99378d8202f923c3c5a8ba                                                   3 minutes ago       Running             kube-proxy                0                   dc663b3932756       kube-proxy-8p84m
	8da7b13f4d48f       7d8d2960de69688eab5698081441539a1662f47e092488973e455a8334955cb1                                                   3 minutes ago       Running             kube-apiserver            0                   8f226235f970b       kube-apiserver-ingress-addon-legacy-538476
	f569edf9afaa7       303ce5db0e90dab1c5728ec70d21091201a23cdf8aeca70ab54943bbaaf0833f                                                   3 minutes ago       Running             etcd                      0                   16f872bfb5016       etcd-ingress-addon-legacy-538476
	cfd84dd38fffd       e7c545a60706cf009a893afdc7dba900cc2e342b8042b9c421d607ca41e8b290                                                   3 minutes ago       Running             kube-controller-manager   0                   2b2bde80939f2       kube-controller-manager-ingress-addon-legacy-538476
	c3aafe8ff3995       a05a1a79adaad058478b7638d2e73cf408b283305081516fbe02706b0e205346                                                   3 minutes ago       Running             kube-scheduler            0                   1c009fc748826       kube-scheduler-ingress-addon-legacy-538476
	
	* 
	* ==> coredns [7826cff4a502a2a4a6795eedbe14ee78b0edeb93d78b7d11a9f7f79008dd07eb] <==
	* [INFO] 10.244.0.5:43365 - 9009 "AAAA IN hello-world-app.default.svc.cluster.local.c.k8s-minikube.internal. udp 83 false 512" NXDOMAIN qr,rd,ra 83 0.0055465s
	[INFO] 10.244.0.5:51343 - 9810 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005073149s
	[INFO] 10.244.0.5:47706 - 23637 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.004687899s
	[INFO] 10.244.0.5:39412 - 15907 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005064511s
	[INFO] 10.244.0.5:51502 - 55070 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005144622s
	[INFO] 10.244.0.5:58481 - 3406 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.004771234s
	[INFO] 10.244.0.5:43365 - 36007 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.004848082s
	[INFO] 10.244.0.5:52222 - 39349 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005203278s
	[INFO] 10.244.0.5:57855 - 19448 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005250043s
	[INFO] 10.244.0.5:39412 - 31668 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005911378s
	[INFO] 10.244.0.5:47706 - 49712 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.006050203s
	[INFO] 10.244.0.5:52222 - 10685 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005910702s
	[INFO] 10.244.0.5:51343 - 5574 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.006189129s
	[INFO] 10.244.0.5:57855 - 23475 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005781179s
	[INFO] 10.244.0.5:43365 - 51604 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005992486s
	[INFO] 10.244.0.5:51502 - 43500 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.006020824s
	[INFO] 10.244.0.5:47706 - 21437 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.00012833s
	[INFO] 10.244.0.5:52222 - 43326 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.00005996s
	[INFO] 10.244.0.5:58481 - 22460 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005885879s
	[INFO] 10.244.0.5:57855 - 53500 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000146476s
	[INFO] 10.244.0.5:51502 - 59029 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000168348s
	[INFO] 10.244.0.5:51343 - 34895 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000202662s
	[INFO] 10.244.0.5:39412 - 23556 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000447842s
	[INFO] 10.244.0.5:43365 - 14752 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000273926s
	[INFO] 10.244.0.5:58481 - 7823 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000104775s
	
	* 
	* ==> describe nodes <==
	* Name:               ingress-addon-legacy-538476
	Roles:              master
	Labels:             beta.kubernetes.io/arch=amd64
	                    beta.kubernetes.io/os=linux
	                    kubernetes.io/arch=amd64
	                    kubernetes.io/hostname=ingress-addon-legacy-538476
	                    kubernetes.io/os=linux
	                    minikube.k8s.io/commit=a7848ba25aaaad8ebb50e721c0d343e471188fc7
	                    minikube.k8s.io/name=ingress-addon-legacy-538476
	                    minikube.k8s.io/primary=true
	                    minikube.k8s.io/updated_at=2023_07_31T10_43_40_0700
	                    minikube.k8s.io/version=v1.31.1
	                    node-role.kubernetes.io/master=
	Annotations:        kubeadm.alpha.kubernetes.io/cri-socket: /var/run/crio/crio.sock
	                    node.alpha.kubernetes.io/ttl: 0
	                    volumes.kubernetes.io/controller-managed-attach-detach: true
	CreationTimestamp:  Mon, 31 Jul 2023 10:43:36 +0000
	Taints:             <none>
	Unschedulable:      false
	Lease:
	  HolderIdentity:  ingress-addon-legacy-538476
	  AcquireTime:     <unset>
	  RenewTime:       Mon, 31 Jul 2023 10:47:19 +0000
	Conditions:
	  Type             Status  LastHeartbeatTime                 LastTransitionTime                Reason                       Message
	  ----             ------  -----------------                 ------------------                ------                       -------
	  MemoryPressure   False   Mon, 31 Jul 2023 10:47:09 +0000   Mon, 31 Jul 2023 10:43:33 +0000   KubeletHasSufficientMemory   kubelet has sufficient memory available
	  DiskPressure     False   Mon, 31 Jul 2023 10:47:09 +0000   Mon, 31 Jul 2023 10:43:33 +0000   KubeletHasNoDiskPressure     kubelet has no disk pressure
	  PIDPressure      False   Mon, 31 Jul 2023 10:47:09 +0000   Mon, 31 Jul 2023 10:43:33 +0000   KubeletHasSufficientPID      kubelet has sufficient PID available
	  Ready            True    Mon, 31 Jul 2023 10:47:09 +0000   Mon, 31 Jul 2023 10:44:09 +0000   KubeletReady                 kubelet is posting ready status
	Addresses:
	  InternalIP:  192.168.49.2
	  Hostname:    ingress-addon-legacy-538476
	Capacity:
	  cpu:                8
	  ephemeral-storage:  304681132Ki
	  hugepages-1Gi:      0
	  hugepages-2Mi:      0
	  memory:             32859436Ki
	  pods:               110
	Allocatable:
	  cpu:                8
	  ephemeral-storage:  304681132Ki
	  hugepages-1Gi:      0
	  hugepages-2Mi:      0
	  memory:             32859436Ki
	  pods:               110
	System Info:
	  Machine ID:                 9e4a168d02c24b59bbc934291d87bae5
	  System UUID:                3a0655a8-51e4-4421-bedc-c0b2534afe25
	  Boot ID:                    29fc075f-138b-4be6-bf1b-3db3f063b35c
	  Kernel Version:             5.15.0-1038-gcp
	  OS Image:                   Ubuntu 22.04.2 LTS
	  Operating System:           linux
	  Architecture:               amd64
	  Container Runtime Version:  cri-o://1.24.6
	  Kubelet Version:            v1.18.20
	  Kube-Proxy Version:         v1.18.20
	PodCIDR:                      10.244.0.0/24
	PodCIDRs:                     10.244.0.0/24
	Non-terminated Pods:          (10 in total)
	  Namespace                   Name                                                   CPU Requests  CPU Limits  Memory Requests  Memory Limits  AGE
	  ---------                   ----                                                   ------------  ----------  ---------------  -------------  ---
	  default                     hello-world-app-5f5d8b66bb-c9xpv                       0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         25s
	  default                     nginx                                                  0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         2m45s
	  kube-system                 coredns-66bff467f8-2ldkm                               100m (1%!)(MISSING)     0 (0%!)(MISSING)      70Mi (0%!)(MISSING)        170Mi (0%!)(MISSING)     3m30s
	  kube-system                 etcd-ingress-addon-legacy-538476                       0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         3m46s
	  kube-system                 kindnet-djtg2                                          100m (1%!)(MISSING)     100m (1%!)(MISSING)   50Mi (0%!)(MISSING)        50Mi (0%!)(MISSING)      3m30s
	  kube-system                 kube-apiserver-ingress-addon-legacy-538476             250m (3%!)(MISSING)     0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         3m47s
	  kube-system                 kube-controller-manager-ingress-addon-legacy-538476    200m (2%!)(MISSING)     0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         3m47s
	  kube-system                 kube-proxy-8p84m                                       0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         3m30s
	  kube-system                 kube-scheduler-ingress-addon-legacy-538476             100m (1%!)(MISSING)     0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         3m46s
	  kube-system                 storage-provisioner                                    0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         3m29s
	Allocated resources:
	  (Total limits may be over 100 percent, i.e., overcommitted.)
	  Resource           Requests    Limits
	  --------           --------    ------
	  cpu                750m (9%!)(MISSING)   100m (1%!)(MISSING)
	  memory             120Mi (0%!)(MISSING)  220Mi (0%!)(MISSING)
	  ephemeral-storage  0 (0%!)(MISSING)      0 (0%!)(MISSING)
	  hugepages-1Gi      0 (0%!)(MISSING)      0 (0%!)(MISSING)
	  hugepages-2Mi      0 (0%!)(MISSING)      0 (0%!)(MISSING)
	Events:
	  Type    Reason                   Age    From        Message
	  ----    ------                   ----   ----        -------
	  Normal  Starting                 3m47s  kubelet     Starting kubelet.
	  Normal  NodeHasSufficientMemory  3m47s  kubelet     Node ingress-addon-legacy-538476 status is now: NodeHasSufficientMemory
	  Normal  NodeHasNoDiskPressure    3m47s  kubelet     Node ingress-addon-legacy-538476 status is now: NodeHasNoDiskPressure
	  Normal  NodeHasSufficientPID     3m47s  kubelet     Node ingress-addon-legacy-538476 status is now: NodeHasSufficientPID
	  Normal  Starting                 3m29s  kube-proxy  Starting kube-proxy.
	  Normal  NodeReady                3m17s  kubelet     Node ingress-addon-legacy-538476 status is now: NodeReady
	
	* 
	* ==> dmesg <==
	* [  +0.004911] FS-Cache: N-cookie c=0000000f [p=00000003 fl=2 nc=0 na=1]
	[  +0.006641] FS-Cache: N-cookie d=000000003af63fe5{9p.inode} n=00000000d1059337
	[  +0.008699] FS-Cache: N-key=[8] '88a00f0200000000'
	[  +0.248824] FS-Cache: Duplicate cookie detected
	[  +0.004664] FS-Cache: O-cookie c=00000009 [p=00000003 fl=226 nc=0 na=1]
	[  +0.006749] FS-Cache: O-cookie d=000000003af63fe5{9p.inode} n=0000000050399130
	[  +0.007352] FS-Cache: O-key=[8] '92a00f0200000000'
	[  +0.004961] FS-Cache: N-cookie c=00000010 [p=00000003 fl=2 nc=0 na=1]
	[  +0.006590] FS-Cache: N-cookie d=000000003af63fe5{9p.inode} n=000000007a051ada
	[  +0.008734] FS-Cache: N-key=[8] '92a00f0200000000'
	[ +20.004980] kmem.limit_in_bytes is deprecated and will be removed. Please report your usecase to linux-mm@kvack.org if you depend on this functionality.
	[Jul31 10:44] IPv4: martian source 10.244.0.5 from 127.0.0.1, on dev eth0
	[  +0.000006] ll header: 00000000: 92 cf 61 ac 24 44 52 90 48 90 1a 74 08 00
	[  +1.020117] IPv4: martian source 10.244.0.5 from 127.0.0.1, on dev eth0
	[  +0.000005] ll header: 00000000: 92 cf 61 ac 24 44 52 90 48 90 1a 74 08 00
	[  +2.015807] IPv4: martian source 10.244.0.5 from 127.0.0.1, on dev eth0
	[  +0.000006] ll header: 00000000: 92 cf 61 ac 24 44 52 90 48 90 1a 74 08 00
	[  +4.127602] IPv4: martian source 10.244.0.5 from 127.0.0.1, on dev eth0
	[  +0.000008] ll header: 00000000: 92 cf 61 ac 24 44 52 90 48 90 1a 74 08 00
	[Jul31 10:45] IPv4: martian source 10.244.0.5 from 127.0.0.1, on dev eth0
	[  +0.000006] ll header: 00000000: 92 cf 61 ac 24 44 52 90 48 90 1a 74 08 00
	[ +16.126451] IPv4: martian source 10.244.0.5 from 127.0.0.1, on dev eth0
	[  +0.000006] ll header: 00000000: 92 cf 61 ac 24 44 52 90 48 90 1a 74 08 00
	[ +33.276809] IPv4: martian source 10.244.0.5 from 127.0.0.1, on dev eth0
	[  +0.000008] ll header: 00000000: 92 cf 61 ac 24 44 52 90 48 90 1a 74 08 00
	
	* 
	* ==> etcd [f569edf9afaa772fba4a9785f6e1e893c68ccbbba0510216d6d035e1d71200bf] <==
	* 2023-07-31 10:43:33.030252 I | etcdserver: starting server... [version: 3.4.3, cluster version: to_be_decided]
	2023-07-31 10:43:33.030337 I | etcdserver: aec36adc501070cc as single-node; fast-forwarding 9 ticks (election ticks 10)
	raft2023/07/31 10:43:33 INFO: aec36adc501070cc switched to configuration voters=(12593026477526642892)
	2023-07-31 10:43:33.031045 I | etcdserver/membership: added member aec36adc501070cc [https://192.168.49.2:2380] to cluster fa54960ea34d58be
	2023-07-31 10:43:33.032211 I | embed: ClientTLS: cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = 
	2023-07-31 10:43:33.032326 I | embed: listening for peers on 192.168.49.2:2380
	2023-07-31 10:43:33.032345 I | embed: listening for metrics on http://127.0.0.1:2381
	raft2023/07/31 10:43:33 INFO: aec36adc501070cc is starting a new election at term 1
	raft2023/07/31 10:43:33 INFO: aec36adc501070cc became candidate at term 2
	raft2023/07/31 10:43:33 INFO: aec36adc501070cc received MsgVoteResp from aec36adc501070cc at term 2
	raft2023/07/31 10:43:33 INFO: aec36adc501070cc became leader at term 2
	raft2023/07/31 10:43:33 INFO: raft.node: aec36adc501070cc elected leader aec36adc501070cc at term 2
	2023-07-31 10:43:33.224505 I | etcdserver: published {Name:ingress-addon-legacy-538476 ClientURLs:[https://192.168.49.2:2379]} to cluster fa54960ea34d58be
	2023-07-31 10:43:33.224530 I | embed: ready to serve client requests
	2023-07-31 10:43:33.224539 I | embed: ready to serve client requests
	2023-07-31 10:43:33.224571 I | etcdserver: setting up the initial cluster version to 3.4
	2023-07-31 10:43:33.225124 N | etcdserver/membership: set the initial cluster version to 3.4
	2023-07-31 10:43:33.225265 I | etcdserver/api: enabled capabilities for version 3.4
	2023-07-31 10:43:33.227006 I | embed: serving client requests on 127.0.0.1:2379
	2023-07-31 10:43:33.227131 I | embed: serving client requests on 192.168.49.2:2379
	2023-07-31 10:43:59.927769 W | etcdserver: read-only range request "key:\"/registry/pods/kube-system/kube-apiserver-ingress-addon-legacy-538476\" " with result "range_response_count:1 size:6680" took too long (107.650777ms) to execute
	2023-07-31 10:44:00.114091 W | etcdserver: read-only range request "key:\"/registry/pods/kube-system/kube-controller-manager-ingress-addon-legacy-538476\" " with result "range_response_count:1 size:6682" took too long (178.182873ms) to execute
	2023-07-31 10:44:00.114214 W | etcdserver: read-only range request "key:\"/registry/minions/ingress-addon-legacy-538476\" " with result "range_response_count:1 size:6604" took too long (158.38254ms) to execute
	2023-07-31 10:44:00.364995 W | etcdserver: read-only range request "key:\"/registry/pods/kube-system/kube-scheduler-ingress-addon-legacy-538476\" " with result "range_response_count:1 size:3909" took too long (210.98535ms) to execute
	2023-07-31 10:44:00.630031 W | etcdserver: read-only range request "key:\"/registry/minions/ingress-addon-legacy-538476\" " with result "range_response_count:1 size:6604" took too long (174.257004ms) to execute
	
	* 
	* ==> kernel <==
	*  10:47:26 up 29 min,  0 users,  load average: 0.24, 0.73, 0.58
	Linux ingress-addon-legacy-538476 5.15.0-1038-gcp #46~20.04.1-Ubuntu SMP Fri Jul 14 09:48:19 UTC 2023 x86_64 x86_64 x86_64 GNU/Linux
	PRETTY_NAME="Ubuntu 22.04.2 LTS"
	
	* 
	* ==> kindnet [4a392a96cc39c3901a8b144cc40aecad6f57c865f3674a8755cd60f3ee7d2ce7] <==
	* I0731 10:45:21.876940       1 main.go:227] handling current node
	I0731 10:45:31.888905       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I0731 10:45:31.888927       1 main.go:227] handling current node
	I0731 10:45:41.892322       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I0731 10:45:41.892351       1 main.go:227] handling current node
	I0731 10:45:51.898501       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I0731 10:45:51.898531       1 main.go:227] handling current node
	I0731 10:46:01.901997       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I0731 10:46:01.902024       1 main.go:227] handling current node
	I0731 10:46:11.914305       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I0731 10:46:11.914328       1 main.go:227] handling current node
	I0731 10:46:21.918335       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I0731 10:46:21.918357       1 main.go:227] handling current node
	I0731 10:46:31.930389       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I0731 10:46:31.930416       1 main.go:227] handling current node
	I0731 10:46:41.942257       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I0731 10:46:41.942280       1 main.go:227] handling current node
	I0731 10:46:51.945141       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I0731 10:46:51.945164       1 main.go:227] handling current node
	I0731 10:47:01.948197       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I0731 10:47:01.948220       1 main.go:227] handling current node
	I0731 10:47:11.956998       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I0731 10:47:11.957024       1 main.go:227] handling current node
	I0731 10:47:21.960869       1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
	I0731 10:47:21.960896       1 main.go:227] handling current node
	
	* 
	* ==> kube-apiserver [8da7b13f4d48f36b6fc76d4339360d50aff76b6aa3a1ec957d6da05119bc8753] <==
	* I0731 10:43:36.728632       1 dynamic_cafile_content.go:167] Starting client-ca-bundle::/var/lib/minikube/certs/ca.crt
	I0731 10:43:36.729490       1 dynamic_cafile_content.go:167] Starting request-header::/var/lib/minikube/certs/front-proxy-ca.crt
	I0731 10:43:36.814366       1 shared_informer.go:230] Caches are synced for crd-autoregister 
	I0731 10:43:36.826072       1 shared_informer.go:230] Caches are synced for cluster_authentication_trust_controller 
	I0731 10:43:36.904465       1 cache.go:39] Caches are synced for APIServiceRegistrationController controller
	I0731 10:43:36.907372       1 cache.go:39] Caches are synced for AvailableConditionController controller
	I0731 10:43:36.907783       1 cache.go:39] Caches are synced for autoregister controller
	I0731 10:43:37.703105       1 controller.go:130] OpenAPI AggregationController: action for item : Nothing (removed from the queue).
	I0731 10:43:37.703139       1 controller.go:130] OpenAPI AggregationController: action for item k8s_internal_local_delegation_chain_0000000000: Nothing (removed from the queue).
	I0731 10:43:37.707708       1 storage_scheduling.go:134] created PriorityClass system-node-critical with value 2000001000
	I0731 10:43:37.710416       1 storage_scheduling.go:134] created PriorityClass system-cluster-critical with value 2000000000
	I0731 10:43:37.710439       1 storage_scheduling.go:143] all system priority classes are created successfully or already exist.
	I0731 10:43:37.970327       1 controller.go:609] quota admission added evaluator for: roles.rbac.authorization.k8s.io
	I0731 10:43:37.995077       1 controller.go:609] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
	W0731 10:43:38.039258       1 lease.go:224] Resetting endpoints for master service "kubernetes" to [192.168.49.2]
	I0731 10:43:38.039955       1 controller.go:609] quota admission added evaluator for: endpoints
	I0731 10:43:38.042606       1 controller.go:609] quota admission added evaluator for: endpointslices.discovery.k8s.io
	I0731 10:43:39.072185       1 controller.go:609] quota admission added evaluator for: serviceaccounts
	I0731 10:43:39.421613       1 controller.go:609] quota admission added evaluator for: deployments.apps
	I0731 10:43:39.611237       1 controller.go:609] quota admission added evaluator for: daemonsets.apps
	I0731 10:43:39.791179       1 controller.go:609] quota admission added evaluator for: leases.coordination.k8s.io
	I0731 10:43:56.473370       1 controller.go:609] quota admission added evaluator for: replicasets.apps
	I0731 10:43:56.611834       1 controller.go:609] quota admission added evaluator for: controllerrevisions.apps
	I0731 10:44:22.331956       1 controller.go:609] quota admission added evaluator for: jobs.batch
	I0731 10:44:41.595762       1 controller.go:609] quota admission added evaluator for: ingresses.networking.k8s.io
	
	* 
	* ==> kube-controller-manager [cfd84dd38fffda777919b3db8ab42ca3128fbbc790dbb0d0c6aee3f3b10d3c9d] <==
	* I0731 10:43:56.504712       1 taint_manager.go:187] Starting NoExecuteTaintManager
	I0731 10:43:56.504767       1 node_lifecycle_controller.go:1433] Initializing eviction metric for zone: 
	W0731 10:43:56.504897       1 node_lifecycle_controller.go:1048] Missing timestamp for Node ingress-addon-legacy-538476. Assuming now as a timestamp.
	I0731 10:43:56.504943       1 node_lifecycle_controller.go:1199] Controller detected that all Nodes are not-Ready. Entering master disruption mode.
	I0731 10:43:56.505370       1 event.go:278] Event(v1.ObjectReference{Kind:"Node", Namespace:"", Name:"ingress-addon-legacy-538476", UID:"a681f1ac-44d5-4fca-beeb-748986d7de1f", APIVersion:"v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RegisteredNode' Node ingress-addon-legacy-538476 event: Registered Node ingress-addon-legacy-538476 in Controller
	I0731 10:43:56.509120       1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kube-system", Name:"coredns-66bff467f8", UID:"07af491d-8c4e-473c-8a41-1d3687b6b26d", APIVersion:"apps/v1", ResourceVersion:"325", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: coredns-66bff467f8-2ldkm
	I0731 10:43:56.526042       1 shared_informer.go:230] Caches are synced for resource quota 
	I0731 10:43:56.604456       1 shared_informer.go:230] Caches are synced for endpoint_slice 
	I0731 10:43:56.604456       1 shared_informer.go:230] Caches are synced for daemon sets 
	I0731 10:43:56.604489       1 shared_informer.go:230] Caches are synced for resource quota 
	I0731 10:43:56.604486       1 shared_informer.go:230] Caches are synced for garbage collector 
	I0731 10:43:56.604516       1 garbagecollector.go:142] Garbage collector: all resource monitors have synced. Proceeding to collect garbage
	I0731 10:43:56.619483       1 event.go:278] Event(v1.ObjectReference{Kind:"DaemonSet", Namespace:"kube-system", Name:"kube-proxy", UID:"913f9ab7-8bea-4841-a60f-b094c95964a6", APIVersion:"apps/v1", ResourceVersion:"206", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kube-proxy-8p84m
	I0731 10:43:56.623918       1 shared_informer.go:230] Caches are synced for garbage collector 
	I0731 10:43:56.712040       1 event.go:278] Event(v1.ObjectReference{Kind:"DaemonSet", Namespace:"kube-system", Name:"kindnet", UID:"7e272b19-9ce2-4f65-bc98-6bb5f75350c2", APIVersion:"apps/v1", ResourceVersion:"225", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kindnet-djtg2
	I0731 10:44:11.505745       1 node_lifecycle_controller.go:1226] Controller detected that some Nodes are Ready. Exiting master disruption mode.
	I0731 10:44:22.323554       1 event.go:278] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"ingress-nginx", Name:"ingress-nginx-controller", UID:"75f99c5f-030f-4922-9d5c-bc472b8e1b0f", APIVersion:"apps/v1", ResourceVersion:"459", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set ingress-nginx-controller-7fcf777cb7 to 1
	I0731 10:44:22.329093       1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"ingress-nginx", Name:"ingress-nginx-controller-7fcf777cb7", UID:"6cbc0aa2-9d39-4cf7-9134-03495b9405c4", APIVersion:"apps/v1", ResourceVersion:"460", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: ingress-nginx-controller-7fcf777cb7-dmthl
	I0731 10:44:22.340618       1 event.go:278] Event(v1.ObjectReference{Kind:"Job", Namespace:"ingress-nginx", Name:"ingress-nginx-admission-create", UID:"e4e4013b-239f-45a4-9e62-5108dcaa9554", APIVersion:"batch/v1", ResourceVersion:"464", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: ingress-nginx-admission-create-42669
	I0731 10:44:22.413867       1 event.go:278] Event(v1.ObjectReference{Kind:"Job", Namespace:"ingress-nginx", Name:"ingress-nginx-admission-patch", UID:"b1a640b7-0247-4452-bd62-d77ab27bff56", APIVersion:"batch/v1", ResourceVersion:"473", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: ingress-nginx-admission-patch-fhh54
	I0731 10:44:25.959501       1 event.go:278] Event(v1.ObjectReference{Kind:"Job", Namespace:"ingress-nginx", Name:"ingress-nginx-admission-create", UID:"e4e4013b-239f-45a4-9e62-5108dcaa9554", APIVersion:"batch/v1", ResourceVersion:"477", FieldPath:""}): type: 'Normal' reason: 'Completed' Job completed
	I0731 10:44:25.965630       1 event.go:278] Event(v1.ObjectReference{Kind:"Job", Namespace:"ingress-nginx", Name:"ingress-nginx-admission-patch", UID:"b1a640b7-0247-4452-bd62-d77ab27bff56", APIVersion:"batch/v1", ResourceVersion:"483", FieldPath:""}): type: 'Normal' reason: 'Completed' Job completed
	I0731 10:47:01.484179       1 event.go:278] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"default", Name:"hello-world-app", UID:"c902c4b5-7293-45bb-a87c-e35d9a9e2141", APIVersion:"apps/v1", ResourceVersion:"694", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set hello-world-app-5f5d8b66bb to 1
	I0731 10:47:01.492994       1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"default", Name:"hello-world-app-5f5d8b66bb", UID:"d6a1ae4f-4955-4c0a-b313-b47ae122e53d", APIVersion:"apps/v1", ResourceVersion:"695", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: hello-world-app-5f5d8b66bb-c9xpv
	E0731 10:47:23.804515       1 tokens_controller.go:261] error synchronizing serviceaccount ingress-nginx/default: secrets "default-token-gz7jj" is forbidden: unable to create new content in namespace ingress-nginx because it is being terminated
	
	* 
	* ==> kube-proxy [607fef01d189f2f747a0272a4d346d73e1c943cb3c29551018e801b99d6a8afc] <==
	* W0731 10:43:57.367290       1 server_others.go:559] Unknown proxy mode "", assuming iptables proxy
	I0731 10:43:57.373474       1 node.go:136] Successfully retrieved node IP: 192.168.49.2
	I0731 10:43:57.373502       1 server_others.go:186] Using iptables Proxier.
	I0731 10:43:57.373770       1 server.go:583] Version: v1.18.20
	I0731 10:43:57.374300       1 config.go:133] Starting endpoints config controller
	I0731 10:43:57.374320       1 shared_informer.go:223] Waiting for caches to sync for endpoints config
	I0731 10:43:57.374367       1 config.go:315] Starting service config controller
	I0731 10:43:57.374382       1 shared_informer.go:223] Waiting for caches to sync for service config
	I0731 10:43:57.474511       1 shared_informer.go:230] Caches are synced for endpoints config 
	I0731 10:43:57.474561       1 shared_informer.go:230] Caches are synced for service config 
	
	* 
	* ==> kube-scheduler [c3aafe8ff3995b8b77ac5cf97fa4092de2cb4f57a6553a9735ba353ab9eaac77] <==
	* I0731 10:43:33.411256       1 serving.go:313] Generated self-signed cert in-memory
	W0731 10:43:36.822136       1 authentication.go:349] Unable to get configmap/extension-apiserver-authentication in kube-system.  Usually fixed by 'kubectl create rolebinding -n kube-system ROLEBINDING_NAME --role=extension-apiserver-authentication-reader --serviceaccount=YOUR_NS:YOUR_SA'
	W0731 10:43:36.822222       1 authentication.go:297] Error looking up in-cluster authentication configuration: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot get resource "configmaps" in API group "" in the namespace "kube-system"
	W0731 10:43:36.822236       1 authentication.go:298] Continuing without authentication configuration. This may treat all requests as anonymous.
	W0731 10:43:36.822244       1 authentication.go:299] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
	I0731 10:43:36.907453       1 registry.go:150] Registering EvenPodsSpread predicate and priority function
	I0731 10:43:36.907479       1 registry.go:150] Registering EvenPodsSpread predicate and priority function
	I0731 10:43:36.909856       1 secure_serving.go:178] Serving securely on 127.0.0.1:10259
	I0731 10:43:36.910001       1 configmap_cafile_content.go:202] Starting client-ca::kube-system::extension-apiserver-authentication::client-ca-file
	I0731 10:43:36.910019       1 shared_informer.go:223] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
	I0731 10:43:36.910042       1 tlsconfig.go:240] Starting DynamicServingCertificateController
	E0731 10:43:36.911857       1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
	E0731 10:43:36.914791       1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
	E0731 10:43:36.914972       1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
	E0731 10:43:36.915234       1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
	E0731 10:43:36.915038       1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
	E0731 10:43:36.915052       1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
	E0731 10:43:36.915129       1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
	E0731 10:43:36.915129       1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
	E0731 10:43:36.915200       1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
	E0731 10:43:36.915337       1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
	E0731 10:43:36.915526       1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
	E0731 10:43:36.915988       1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
	I0731 10:43:38.410151       1 shared_informer.go:230] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file 
	E0731 10:43:57.326964       1 factory.go:503] pod: kube-system/storage-provisioner is already present in unschedulable queue
	
	* 
	* ==> kubelet <==
	* Jul 31 10:46:46 ingress-addon-legacy-538476 kubelet[1887]: E0731 10:46:46.818556    1887 kuberuntime_manager.go:818] container start failed: ImageInspectError: Failed to inspect image "cryptexlabs/minikube-ingress-dns:0.3.0@sha256:e252d2a4c704027342b303cc563e95d2e71d2a0f1404f55d676390e28d5093ab": rpc error: code = Unknown desc = short-name "cryptexlabs/minikube-ingress-dns:0.3.0@sha256:e252d2a4c704027342b303cc563e95d2e71d2a0f1404f55d676390e28d5093ab" did not resolve to an alias and no unqualified-search registries are defined in "/etc/containers/registries.conf"
	Jul 31 10:46:46 ingress-addon-legacy-538476 kubelet[1887]: E0731 10:46:46.818587    1887 pod_workers.go:191] Error syncing pod 2e4a8c1f-1e25-497b-984e-967b9aa9a028 ("kube-ingress-dns-minikube_kube-system(2e4a8c1f-1e25-497b-984e-967b9aa9a028)"), skipping: failed to "StartContainer" for "minikube-ingress-dns" with ImageInspectError: "Failed to inspect image \"cryptexlabs/minikube-ingress-dns:0.3.0@sha256:e252d2a4c704027342b303cc563e95d2e71d2a0f1404f55d676390e28d5093ab\": rpc error: code = Unknown desc = short-name \"cryptexlabs/minikube-ingress-dns:0.3.0@sha256:e252d2a4c704027342b303cc563e95d2e71d2a0f1404f55d676390e28d5093ab\" did not resolve to an alias and no unqualified-search registries are defined in \"/etc/containers/registries.conf\""
	Jul 31 10:47:00 ingress-addon-legacy-538476 kubelet[1887]: E0731 10:47:00.818471    1887 remote_image.go:87] ImageStatus "cryptexlabs/minikube-ingress-dns:0.3.0@sha256:e252d2a4c704027342b303cc563e95d2e71d2a0f1404f55d676390e28d5093ab" from image service failed: rpc error: code = Unknown desc = short-name "cryptexlabs/minikube-ingress-dns:0.3.0@sha256:e252d2a4c704027342b303cc563e95d2e71d2a0f1404f55d676390e28d5093ab" did not resolve to an alias and no unqualified-search registries are defined in "/etc/containers/registries.conf"
	Jul 31 10:47:00 ingress-addon-legacy-538476 kubelet[1887]: E0731 10:47:00.818510    1887 kuberuntime_image.go:85] ImageStatus for image {"cryptexlabs/minikube-ingress-dns:0.3.0@sha256:e252d2a4c704027342b303cc563e95d2e71d2a0f1404f55d676390e28d5093ab"} failed: rpc error: code = Unknown desc = short-name "cryptexlabs/minikube-ingress-dns:0.3.0@sha256:e252d2a4c704027342b303cc563e95d2e71d2a0f1404f55d676390e28d5093ab" did not resolve to an alias and no unqualified-search registries are defined in "/etc/containers/registries.conf"
	Jul 31 10:47:00 ingress-addon-legacy-538476 kubelet[1887]: E0731 10:47:00.818549    1887 kuberuntime_manager.go:818] container start failed: ImageInspectError: Failed to inspect image "cryptexlabs/minikube-ingress-dns:0.3.0@sha256:e252d2a4c704027342b303cc563e95d2e71d2a0f1404f55d676390e28d5093ab": rpc error: code = Unknown desc = short-name "cryptexlabs/minikube-ingress-dns:0.3.0@sha256:e252d2a4c704027342b303cc563e95d2e71d2a0f1404f55d676390e28d5093ab" did not resolve to an alias and no unqualified-search registries are defined in "/etc/containers/registries.conf"
	Jul 31 10:47:00 ingress-addon-legacy-538476 kubelet[1887]: E0731 10:47:00.818573    1887 pod_workers.go:191] Error syncing pod 2e4a8c1f-1e25-497b-984e-967b9aa9a028 ("kube-ingress-dns-minikube_kube-system(2e4a8c1f-1e25-497b-984e-967b9aa9a028)"), skipping: failed to "StartContainer" for "minikube-ingress-dns" with ImageInspectError: "Failed to inspect image \"cryptexlabs/minikube-ingress-dns:0.3.0@sha256:e252d2a4c704027342b303cc563e95d2e71d2a0f1404f55d676390e28d5093ab\": rpc error: code = Unknown desc = short-name \"cryptexlabs/minikube-ingress-dns:0.3.0@sha256:e252d2a4c704027342b303cc563e95d2e71d2a0f1404f55d676390e28d5093ab\" did not resolve to an alias and no unqualified-search registries are defined in \"/etc/containers/registries.conf\""
	Jul 31 10:47:01 ingress-addon-legacy-538476 kubelet[1887]: I0731 10:47:01.496988    1887 topology_manager.go:235] [topologymanager] Topology Admit Handler
	Jul 31 10:47:01 ingress-addon-legacy-538476 kubelet[1887]: I0731 10:47:01.604561    1887 reconciler.go:224] operationExecutor.VerifyControllerAttachedVolume started for volume "default-token-wmdqn" (UniqueName: "kubernetes.io/secret/88d32f1b-9e94-4720-8f8b-ff57ac5898f7-default-token-wmdqn") pod "hello-world-app-5f5d8b66bb-c9xpv" (UID: "88d32f1b-9e94-4720-8f8b-ff57ac5898f7")
	Jul 31 10:47:01 ingress-addon-legacy-538476 kubelet[1887]: W0731 10:47:01.847026    1887 manager.go:1131] Failed to process watch event {EventType:0 Name:/docker/ddc807f676f3c79cf790cac2393305c7fd24a541731a2e987d238a98e9094e19/crio-9f3bd927bbbbb189662ac2fe53be5964605361dcc1f166b2b25673a492ee2d99 WatchSource:0}: Error finding container 9f3bd927bbbbb189662ac2fe53be5964605361dcc1f166b2b25673a492ee2d99: Status 404 returned error &{%!!(MISSING)s(*http.body=&{0xc0014f6100 <nil> <nil> false false {0 0} false false false <nil>}) {%!!(MISSING)s(int32=0) %!!(MISSING)s(uint32=0)} %!!(MISSING)s(bool=false) <nil> %!!(MISSING)s(func(error) error=0x750800) %!!(MISSING)s(func() error=0x750790)}
	Jul 31 10:47:12 ingress-addon-legacy-538476 kubelet[1887]: E0731 10:47:12.818435    1887 remote_image.go:87] ImageStatus "cryptexlabs/minikube-ingress-dns:0.3.0@sha256:e252d2a4c704027342b303cc563e95d2e71d2a0f1404f55d676390e28d5093ab" from image service failed: rpc error: code = Unknown desc = short-name "cryptexlabs/minikube-ingress-dns:0.3.0@sha256:e252d2a4c704027342b303cc563e95d2e71d2a0f1404f55d676390e28d5093ab" did not resolve to an alias and no unqualified-search registries are defined in "/etc/containers/registries.conf"
	Jul 31 10:47:12 ingress-addon-legacy-538476 kubelet[1887]: E0731 10:47:12.818486    1887 kuberuntime_image.go:85] ImageStatus for image {"cryptexlabs/minikube-ingress-dns:0.3.0@sha256:e252d2a4c704027342b303cc563e95d2e71d2a0f1404f55d676390e28d5093ab"} failed: rpc error: code = Unknown desc = short-name "cryptexlabs/minikube-ingress-dns:0.3.0@sha256:e252d2a4c704027342b303cc563e95d2e71d2a0f1404f55d676390e28d5093ab" did not resolve to an alias and no unqualified-search registries are defined in "/etc/containers/registries.conf"
	Jul 31 10:47:12 ingress-addon-legacy-538476 kubelet[1887]: E0731 10:47:12.818538    1887 kuberuntime_manager.go:818] container start failed: ImageInspectError: Failed to inspect image "cryptexlabs/minikube-ingress-dns:0.3.0@sha256:e252d2a4c704027342b303cc563e95d2e71d2a0f1404f55d676390e28d5093ab": rpc error: code = Unknown desc = short-name "cryptexlabs/minikube-ingress-dns:0.3.0@sha256:e252d2a4c704027342b303cc563e95d2e71d2a0f1404f55d676390e28d5093ab" did not resolve to an alias and no unqualified-search registries are defined in "/etc/containers/registries.conf"
	Jul 31 10:47:12 ingress-addon-legacy-538476 kubelet[1887]: E0731 10:47:12.818574    1887 pod_workers.go:191] Error syncing pod 2e4a8c1f-1e25-497b-984e-967b9aa9a028 ("kube-ingress-dns-minikube_kube-system(2e4a8c1f-1e25-497b-984e-967b9aa9a028)"), skipping: failed to "StartContainer" for "minikube-ingress-dns" with ImageInspectError: "Failed to inspect image \"cryptexlabs/minikube-ingress-dns:0.3.0@sha256:e252d2a4c704027342b303cc563e95d2e71d2a0f1404f55d676390e28d5093ab\": rpc error: code = Unknown desc = short-name \"cryptexlabs/minikube-ingress-dns:0.3.0@sha256:e252d2a4c704027342b303cc563e95d2e71d2a0f1404f55d676390e28d5093ab\" did not resolve to an alias and no unqualified-search registries are defined in \"/etc/containers/registries.conf\""
	Jul 31 10:47:17 ingress-addon-legacy-538476 kubelet[1887]: I0731 10:47:17.238684    1887 reconciler.go:196] operationExecutor.UnmountVolume started for volume "minikube-ingress-dns-token-8l8l5" (UniqueName: "kubernetes.io/secret/2e4a8c1f-1e25-497b-984e-967b9aa9a028-minikube-ingress-dns-token-8l8l5") pod "2e4a8c1f-1e25-497b-984e-967b9aa9a028" (UID: "2e4a8c1f-1e25-497b-984e-967b9aa9a028")
	Jul 31 10:47:17 ingress-addon-legacy-538476 kubelet[1887]: I0731 10:47:17.240452    1887 operation_generator.go:782] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e4a8c1f-1e25-497b-984e-967b9aa9a028-minikube-ingress-dns-token-8l8l5" (OuterVolumeSpecName: "minikube-ingress-dns-token-8l8l5") pod "2e4a8c1f-1e25-497b-984e-967b9aa9a028" (UID: "2e4a8c1f-1e25-497b-984e-967b9aa9a028"). InnerVolumeSpecName "minikube-ingress-dns-token-8l8l5". PluginName "kubernetes.io/secret", VolumeGidValue ""
	Jul 31 10:47:17 ingress-addon-legacy-538476 kubelet[1887]: I0731 10:47:17.338954    1887 reconciler.go:319] Volume detached for volume "minikube-ingress-dns-token-8l8l5" (UniqueName: "kubernetes.io/secret/2e4a8c1f-1e25-497b-984e-967b9aa9a028-minikube-ingress-dns-token-8l8l5") on node "ingress-addon-legacy-538476" DevicePath ""
	Jul 31 10:47:18 ingress-addon-legacy-538476 kubelet[1887]: E0731 10:47:18.995304    1887 event.go:260] Server rejected event '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"ingress-nginx-controller-7fcf777cb7-dmthl.1776ee0a804e3d83", GenerateName:"", Namespace:"ingress-nginx", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"ingress-nginx", Name:"ingress-nginx-controller-7fcf777cb7-dmthl", UID:"18984b9b-e7d5-4027-a1c5-d126db73b13f", APIVersion:"v1", ResourceVersion:"466", FieldPath:"spec.containers{controller}"}, Reason:"Killing", Message:"Stoppi
ng container controller", Source:v1.EventSource{Component:"kubelet", Host:"ingress-addon-legacy-538476"}, FirstTimestamp:v1.Time{Time:time.Time{wall:0xc12a022dbb3da183, ext:219605926439, loc:(*time.Location)(0x701e5a0)}}, LastTimestamp:v1.Time{Time:time.Time{wall:0xc12a022dbb3da183, ext:219605926439, loc:(*time.Location)(0x701e5a0)}}, Count:1, Type:"Normal", EventTime:v1.MicroTime{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'events "ingress-nginx-controller-7fcf777cb7-dmthl.1776ee0a804e3d83" is forbidden: unable to create new content in namespace ingress-nginx because it is being terminated' (will not retry!)
	Jul 31 10:47:18 ingress-addon-legacy-538476 kubelet[1887]: E0731 10:47:18.999820    1887 event.go:260] Server rejected event '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"ingress-nginx-controller-7fcf777cb7-dmthl.1776ee0a804e3d83", GenerateName:"", Namespace:"ingress-nginx", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"ingress-nginx", Name:"ingress-nginx-controller-7fcf777cb7-dmthl", UID:"18984b9b-e7d5-4027-a1c5-d126db73b13f", APIVersion:"v1", ResourceVersion:"466", FieldPath:"spec.containers{controller}"}, Reason:"Killing", Message:"Stoppi
ng container controller", Source:v1.EventSource{Component:"kubelet", Host:"ingress-addon-legacy-538476"}, FirstTimestamp:v1.Time{Time:time.Time{wall:0xc12a022dbb3da183, ext:219605926439, loc:(*time.Location)(0x701e5a0)}}, LastTimestamp:v1.Time{Time:time.Time{wall:0xc12a022dbb690bec, ext:219608771726, loc:(*time.Location)(0x701e5a0)}}, Count:2, Type:"Normal", EventTime:v1.MicroTime{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'events "ingress-nginx-controller-7fcf777cb7-dmthl.1776ee0a804e3d83" is forbidden: unable to create new content in namespace ingress-nginx because it is being terminated' (will not retry!)
	Jul 31 10:47:21 ingress-addon-legacy-538476 kubelet[1887]: I0731 10:47:21.247334    1887 reconciler.go:196] operationExecutor.UnmountVolume started for volume "webhook-cert" (UniqueName: "kubernetes.io/secret/18984b9b-e7d5-4027-a1c5-d126db73b13f-webhook-cert") pod "18984b9b-e7d5-4027-a1c5-d126db73b13f" (UID: "18984b9b-e7d5-4027-a1c5-d126db73b13f")
	Jul 31 10:47:21 ingress-addon-legacy-538476 kubelet[1887]: I0731 10:47:21.247392    1887 reconciler.go:196] operationExecutor.UnmountVolume started for volume "ingress-nginx-token-6djkg" (UniqueName: "kubernetes.io/secret/18984b9b-e7d5-4027-a1c5-d126db73b13f-ingress-nginx-token-6djkg") pod "18984b9b-e7d5-4027-a1c5-d126db73b13f" (UID: "18984b9b-e7d5-4027-a1c5-d126db73b13f")
	Jul 31 10:47:21 ingress-addon-legacy-538476 kubelet[1887]: I0731 10:47:21.249195    1887 operation_generator.go:782] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18984b9b-e7d5-4027-a1c5-d126db73b13f-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "18984b9b-e7d5-4027-a1c5-d126db73b13f" (UID: "18984b9b-e7d5-4027-a1c5-d126db73b13f"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue ""
	Jul 31 10:47:21 ingress-addon-legacy-538476 kubelet[1887]: I0731 10:47:21.249462    1887 operation_generator.go:782] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18984b9b-e7d5-4027-a1c5-d126db73b13f-ingress-nginx-token-6djkg" (OuterVolumeSpecName: "ingress-nginx-token-6djkg") pod "18984b9b-e7d5-4027-a1c5-d126db73b13f" (UID: "18984b9b-e7d5-4027-a1c5-d126db73b13f"). InnerVolumeSpecName "ingress-nginx-token-6djkg". PluginName "kubernetes.io/secret", VolumeGidValue ""
	Jul 31 10:47:21 ingress-addon-legacy-538476 kubelet[1887]: W0731 10:47:21.256059    1887 pod_container_deletor.go:77] Container "1a1d1bbcf0b6019d443a166a445fa40d0699e9488ac8d45cc1327ab0a8b0f31f" not found in pod's containers
	Jul 31 10:47:21 ingress-addon-legacy-538476 kubelet[1887]: I0731 10:47:21.347669    1887 reconciler.go:319] Volume detached for volume "webhook-cert" (UniqueName: "kubernetes.io/secret/18984b9b-e7d5-4027-a1c5-d126db73b13f-webhook-cert") on node "ingress-addon-legacy-538476" DevicePath ""
	Jul 31 10:47:21 ingress-addon-legacy-538476 kubelet[1887]: I0731 10:47:21.347707    1887 reconciler.go:319] Volume detached for volume "ingress-nginx-token-6djkg" (UniqueName: "kubernetes.io/secret/18984b9b-e7d5-4027-a1c5-d126db73b13f-ingress-nginx-token-6djkg") on node "ingress-addon-legacy-538476" DevicePath ""
	
	* 
	* ==> storage-provisioner [4edd9d79e3a3d67a06a9d217ca68ffb2a5e8debb0d9e2005840a2f5713ad41c8] <==
	* I0731 10:44:12.954944       1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
	I0731 10:44:12.962312       1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
	I0731 10:44:12.962381       1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
	I0731 10:44:12.967143       1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
	I0731 10:44:12.967244       1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_ingress-addon-legacy-538476_9c36b9a6-1730-4400-a150-0548f2eef095!
	I0731 10:44:12.967479       1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"25c8b43a-d279-4740-8cc9-cc99b4e348b2", APIVersion:"v1", ResourceVersion:"403", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' ingress-addon-legacy-538476_9c36b9a6-1730-4400-a150-0548f2eef095 became leader
	I0731 10:44:13.067648       1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_ingress-addon-legacy-538476_9c36b9a6-1730-4400-a150-0548f2eef095!
	

                                                
                                                
-- /stdout --
helpers_test.go:254: (dbg) Run:  out/minikube-linux-amd64 status --format={{.APIServer}} -p ingress-addon-legacy-538476 -n ingress-addon-legacy-538476
helpers_test.go:261: (dbg) Run:  kubectl --context ingress-addon-legacy-538476 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:285: <<< TestIngressAddonLegacy/serial/ValidateIngressAddons FAILED: end of post-mortem logs <<<
helpers_test.go:286: ---------------------/post-mortem---------------------------------
--- FAIL: TestIngressAddonLegacy/serial/ValidateIngressAddons (174.38s)

                                                
                                    
x
+
TestMultiNode/serial/PingHostFrom2Pods (3.23s)

                                                
                                                
=== RUN   TestMultiNode/serial/PingHostFrom2Pods
multinode_test.go:552: (dbg) Run:  out/minikube-linux-amd64 kubectl -p multinode-776386 -- get pods -o jsonpath='{.items[*].metadata.name}'
multinode_test.go:560: (dbg) Run:  out/minikube-linux-amd64 kubectl -p multinode-776386 -- exec busybox-67b7f59bb-trlh5 -- sh -c "nslookup host.minikube.internal | awk 'NR==5' | cut -d' ' -f3"
multinode_test.go:571: (dbg) Run:  out/minikube-linux-amd64 kubectl -p multinode-776386 -- exec busybox-67b7f59bb-trlh5 -- sh -c "ping -c 1 192.168.58.1"
multinode_test.go:571: (dbg) Non-zero exit: out/minikube-linux-amd64 kubectl -p multinode-776386 -- exec busybox-67b7f59bb-trlh5 -- sh -c "ping -c 1 192.168.58.1": exit status 1 (167.19624ms)

                                                
                                                
-- stdout --
	PING 192.168.58.1 (192.168.58.1): 56 data bytes

                                                
                                                
-- /stdout --
** stderr ** 
	ping: permission denied (are you root?)
	command terminated with exit code 1

                                                
                                                
** /stderr **
multinode_test.go:572: Failed to ping host (192.168.58.1) from pod (busybox-67b7f59bb-trlh5): exit status 1
multinode_test.go:560: (dbg) Run:  out/minikube-linux-amd64 kubectl -p multinode-776386 -- exec busybox-67b7f59bb-tvf5p -- sh -c "nslookup host.minikube.internal | awk 'NR==5' | cut -d' ' -f3"
multinode_test.go:571: (dbg) Run:  out/minikube-linux-amd64 kubectl -p multinode-776386 -- exec busybox-67b7f59bb-tvf5p -- sh -c "ping -c 1 192.168.58.1"
multinode_test.go:571: (dbg) Non-zero exit: out/minikube-linux-amd64 kubectl -p multinode-776386 -- exec busybox-67b7f59bb-tvf5p -- sh -c "ping -c 1 192.168.58.1": exit status 1 (149.415497ms)

                                                
                                                
-- stdout --
	PING 192.168.58.1 (192.168.58.1): 56 data bytes

                                                
                                                
-- /stdout --
** stderr ** 
	ping: permission denied (are you root?)
	command terminated with exit code 1

                                                
                                                
** /stderr **
multinode_test.go:572: Failed to ping host (192.168.58.1) from pod (busybox-67b7f59bb-tvf5p): exit status 1
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======>  post-mortem[TestMultiNode/serial/PingHostFrom2Pods]: docker inspect <======
helpers_test.go:231: (dbg) Run:  docker inspect multinode-776386
helpers_test.go:235: (dbg) docker inspect multinode-776386:

                                                
                                                
-- stdout --
	[
	    {
	        "Id": "656908c1a0b09695a6320c09f205c4cc49aa9f1eeb5bc93be8b593553d25e518",
	        "Created": "2023-07-31T10:52:21.868486545Z",
	        "Path": "/usr/local/bin/entrypoint",
	        "Args": [
	            "/sbin/init"
	        ],
	        "State": {
	            "Status": "running",
	            "Running": true,
	            "Paused": false,
	            "Restarting": false,
	            "OOMKilled": false,
	            "Dead": false,
	            "Pid": 97475,
	            "ExitCode": 0,
	            "Error": "",
	            "StartedAt": "2023-07-31T10:52:22.135349598Z",
	            "FinishedAt": "0001-01-01T00:00:00Z"
	        },
	        "Image": "sha256:c6cc01e6091959400f260dc442708e7c71630b58dab1f7c344cb00926bd84950",
	        "ResolvConfPath": "/var/lib/docker/containers/656908c1a0b09695a6320c09f205c4cc49aa9f1eeb5bc93be8b593553d25e518/resolv.conf",
	        "HostnamePath": "/var/lib/docker/containers/656908c1a0b09695a6320c09f205c4cc49aa9f1eeb5bc93be8b593553d25e518/hostname",
	        "HostsPath": "/var/lib/docker/containers/656908c1a0b09695a6320c09f205c4cc49aa9f1eeb5bc93be8b593553d25e518/hosts",
	        "LogPath": "/var/lib/docker/containers/656908c1a0b09695a6320c09f205c4cc49aa9f1eeb5bc93be8b593553d25e518/656908c1a0b09695a6320c09f205c4cc49aa9f1eeb5bc93be8b593553d25e518-json.log",
	        "Name": "/multinode-776386",
	        "RestartCount": 0,
	        "Driver": "overlay2",
	        "Platform": "linux",
	        "MountLabel": "",
	        "ProcessLabel": "",
	        "AppArmorProfile": "unconfined",
	        "ExecIDs": null,
	        "HostConfig": {
	            "Binds": [
	                "/lib/modules:/lib/modules:ro",
	                "multinode-776386:/var"
	            ],
	            "ContainerIDFile": "",
	            "LogConfig": {
	                "Type": "json-file",
	                "Config": {
	                    "max-size": "100m"
	                }
	            },
	            "NetworkMode": "multinode-776386",
	            "PortBindings": {
	                "22/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "2376/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "32443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "5000/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "8443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ]
	            },
	            "RestartPolicy": {
	                "Name": "no",
	                "MaximumRetryCount": 0
	            },
	            "AutoRemove": false,
	            "VolumeDriver": "",
	            "VolumesFrom": null,
	            "ConsoleSize": [
	                0,
	                0
	            ],
	            "CapAdd": null,
	            "CapDrop": null,
	            "CgroupnsMode": "host",
	            "Dns": [],
	            "DnsOptions": [],
	            "DnsSearch": [],
	            "ExtraHosts": null,
	            "GroupAdd": null,
	            "IpcMode": "private",
	            "Cgroup": "",
	            "Links": null,
	            "OomScoreAdj": 0,
	            "PidMode": "",
	            "Privileged": true,
	            "PublishAllPorts": false,
	            "ReadonlyRootfs": false,
	            "SecurityOpt": [
	                "seccomp=unconfined",
	                "apparmor=unconfined",
	                "label=disable"
	            ],
	            "Tmpfs": {
	                "/run": "",
	                "/tmp": ""
	            },
	            "UTSMode": "",
	            "UsernsMode": "",
	            "ShmSize": 67108864,
	            "Runtime": "runc",
	            "Isolation": "",
	            "CpuShares": 0,
	            "Memory": 2306867200,
	            "NanoCpus": 2000000000,
	            "CgroupParent": "",
	            "BlkioWeight": 0,
	            "BlkioWeightDevice": [],
	            "BlkioDeviceReadBps": [],
	            "BlkioDeviceWriteBps": [],
	            "BlkioDeviceReadIOps": [],
	            "BlkioDeviceWriteIOps": [],
	            "CpuPeriod": 0,
	            "CpuQuota": 0,
	            "CpuRealtimePeriod": 0,
	            "CpuRealtimeRuntime": 0,
	            "CpusetCpus": "",
	            "CpusetMems": "",
	            "Devices": [],
	            "DeviceCgroupRules": null,
	            "DeviceRequests": null,
	            "MemoryReservation": 0,
	            "MemorySwap": 4613734400,
	            "MemorySwappiness": null,
	            "OomKillDisable": false,
	            "PidsLimit": null,
	            "Ulimits": null,
	            "CpuCount": 0,
	            "CpuPercent": 0,
	            "IOMaximumIOps": 0,
	            "IOMaximumBandwidth": 0,
	            "MaskedPaths": null,
	            "ReadonlyPaths": null
	        },
	        "GraphDriver": {
	            "Data": {
	                "LowerDir": "/var/lib/docker/overlay2/eaca5c695447378d21b2719b1f2a327f44f60036f19e2623e101570f5dbcd587-init/diff:/var/lib/docker/overlay2/738d78659811af5605d784380774f3996551e9a95d42d3d998a185d72e7e9dcf/diff",
	                "MergedDir": "/var/lib/docker/overlay2/eaca5c695447378d21b2719b1f2a327f44f60036f19e2623e101570f5dbcd587/merged",
	                "UpperDir": "/var/lib/docker/overlay2/eaca5c695447378d21b2719b1f2a327f44f60036f19e2623e101570f5dbcd587/diff",
	                "WorkDir": "/var/lib/docker/overlay2/eaca5c695447378d21b2719b1f2a327f44f60036f19e2623e101570f5dbcd587/work"
	            },
	            "Name": "overlay2"
	        },
	        "Mounts": [
	            {
	                "Type": "bind",
	                "Source": "/lib/modules",
	                "Destination": "/lib/modules",
	                "Mode": "ro",
	                "RW": false,
	                "Propagation": "rprivate"
	            },
	            {
	                "Type": "volume",
	                "Name": "multinode-776386",
	                "Source": "/var/lib/docker/volumes/multinode-776386/_data",
	                "Destination": "/var",
	                "Driver": "local",
	                "Mode": "z",
	                "RW": true,
	                "Propagation": ""
	            }
	        ],
	        "Config": {
	            "Hostname": "multinode-776386",
	            "Domainname": "",
	            "User": "",
	            "AttachStdin": false,
	            "AttachStdout": false,
	            "AttachStderr": false,
	            "ExposedPorts": {
	                "22/tcp": {},
	                "2376/tcp": {},
	                "32443/tcp": {},
	                "5000/tcp": {},
	                "8443/tcp": {}
	            },
	            "Tty": true,
	            "OpenStdin": false,
	            "StdinOnce": false,
	            "Env": [
	                "container=docker",
	                "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
	            ],
	            "Cmd": null,
	            "Image": "gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631",
	            "Volumes": null,
	            "WorkingDir": "/",
	            "Entrypoint": [
	                "/usr/local/bin/entrypoint",
	                "/sbin/init"
	            ],
	            "OnBuild": null,
	            "Labels": {
	                "created_by.minikube.sigs.k8s.io": "true",
	                "mode.minikube.sigs.k8s.io": "multinode-776386",
	                "name.minikube.sigs.k8s.io": "multinode-776386",
	                "role.minikube.sigs.k8s.io": ""
	            },
	            "StopSignal": "SIGRTMIN+3"
	        },
	        "NetworkSettings": {
	            "Bridge": "",
	            "SandboxID": "37bf8c57e487b7f0837a7d52d6ec9ba662d0aed6cbd680473a0fffdbcd1a73bc",
	            "HairpinMode": false,
	            "LinkLocalIPv6Address": "",
	            "LinkLocalIPv6PrefixLen": 0,
	            "Ports": {
	                "22/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "32847"
	                    }
	                ],
	                "2376/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "32846"
	                    }
	                ],
	                "32443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "32843"
	                    }
	                ],
	                "5000/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "32845"
	                    }
	                ],
	                "8443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "32844"
	                    }
	                ]
	            },
	            "SandboxKey": "/var/run/docker/netns/37bf8c57e487",
	            "SecondaryIPAddresses": null,
	            "SecondaryIPv6Addresses": null,
	            "EndpointID": "",
	            "Gateway": "",
	            "GlobalIPv6Address": "",
	            "GlobalIPv6PrefixLen": 0,
	            "IPAddress": "",
	            "IPPrefixLen": 0,
	            "IPv6Gateway": "",
	            "MacAddress": "",
	            "Networks": {
	                "multinode-776386": {
	                    "IPAMConfig": {
	                        "IPv4Address": "192.168.58.2"
	                    },
	                    "Links": null,
	                    "Aliases": [
	                        "656908c1a0b0",
	                        "multinode-776386"
	                    ],
	                    "NetworkID": "5fdea51da9aab8807253d13dce058f31b6451a37117796c25e8cdf54d57714c8",
	                    "EndpointID": "70091a8d2445ec02d259c0ad893389f137147743e1377aa58768b9470b5a4d81",
	                    "Gateway": "192.168.58.1",
	                    "IPAddress": "192.168.58.2",
	                    "IPPrefixLen": 24,
	                    "IPv6Gateway": "",
	                    "GlobalIPv6Address": "",
	                    "GlobalIPv6PrefixLen": 0,
	                    "MacAddress": "02:42:c0:a8:3a:02",
	                    "DriverOpts": null
	                }
	            }
	        }
	    }
	]

                                                
                                                
-- /stdout --
helpers_test.go:239: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Host}} -p multinode-776386 -n multinode-776386
helpers_test.go:244: <<< TestMultiNode/serial/PingHostFrom2Pods FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======>  post-mortem[TestMultiNode/serial/PingHostFrom2Pods]: minikube logs <======
helpers_test.go:247: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 logs -n 25
helpers_test.go:247: (dbg) Done: out/minikube-linux-amd64 -p multinode-776386 logs -n 25: (1.283631608s)
helpers_test.go:252: TestMultiNode/serial/PingHostFrom2Pods logs: 
-- stdout --
	* 
	* ==> Audit <==
	* |---------|---------------------------------------------------|----------------------|---------|---------|---------------------|---------------------|
	| Command |                       Args                        |       Profile        |  User   | Version |     Start Time      |      End Time       |
	|---------|---------------------------------------------------|----------------------|---------|---------|---------------------|---------------------|
	| start   | -p mount-start-2-532973                           | mount-start-2-532973 | jenkins | v1.31.1 | 31 Jul 23 10:51 UTC | 31 Jul 23 10:52 UTC |
	|         | --memory=2048 --mount                             |                      |         |         |                     |                     |
	|         | --mount-gid 0 --mount-msize                       |                      |         |         |                     |                     |
	|         | 6543 --mount-port 46465                           |                      |         |         |                     |                     |
	|         | --mount-uid 0 --no-kubernetes                     |                      |         |         |                     |                     |
	|         | --driver=docker                                   |                      |         |         |                     |                     |
	|         | --container-runtime=crio                          |                      |         |         |                     |                     |
	| ssh     | mount-start-2-532973 ssh -- ls                    | mount-start-2-532973 | jenkins | v1.31.1 | 31 Jul 23 10:52 UTC | 31 Jul 23 10:52 UTC |
	|         | /minikube-host                                    |                      |         |         |                     |                     |
	| delete  | -p mount-start-1-515905                           | mount-start-1-515905 | jenkins | v1.31.1 | 31 Jul 23 10:52 UTC | 31 Jul 23 10:52 UTC |
	|         | --alsologtostderr -v=5                            |                      |         |         |                     |                     |
	| ssh     | mount-start-2-532973 ssh -- ls                    | mount-start-2-532973 | jenkins | v1.31.1 | 31 Jul 23 10:52 UTC | 31 Jul 23 10:52 UTC |
	|         | /minikube-host                                    |                      |         |         |                     |                     |
	| stop    | -p mount-start-2-532973                           | mount-start-2-532973 | jenkins | v1.31.1 | 31 Jul 23 10:52 UTC | 31 Jul 23 10:52 UTC |
	| start   | -p mount-start-2-532973                           | mount-start-2-532973 | jenkins | v1.31.1 | 31 Jul 23 10:52 UTC | 31 Jul 23 10:52 UTC |
	| ssh     | mount-start-2-532973 ssh -- ls                    | mount-start-2-532973 | jenkins | v1.31.1 | 31 Jul 23 10:52 UTC | 31 Jul 23 10:52 UTC |
	|         | /minikube-host                                    |                      |         |         |                     |                     |
	| delete  | -p mount-start-2-532973                           | mount-start-2-532973 | jenkins | v1.31.1 | 31 Jul 23 10:52 UTC | 31 Jul 23 10:52 UTC |
	| delete  | -p mount-start-1-515905                           | mount-start-1-515905 | jenkins | v1.31.1 | 31 Jul 23 10:52 UTC | 31 Jul 23 10:52 UTC |
	| start   | -p multinode-776386                               | multinode-776386     | jenkins | v1.31.1 | 31 Jul 23 10:52 UTC | 31 Jul 23 10:54 UTC |
	|         | --wait=true --memory=2200                         |                      |         |         |                     |                     |
	|         | --nodes=2 -v=8                                    |                      |         |         |                     |                     |
	|         | --alsologtostderr                                 |                      |         |         |                     |                     |
	|         | --driver=docker                                   |                      |         |         |                     |                     |
	|         | --container-runtime=crio                          |                      |         |         |                     |                     |
	| kubectl | -p multinode-776386 -- apply -f                   | multinode-776386     | jenkins | v1.31.1 | 31 Jul 23 10:54 UTC | 31 Jul 23 10:54 UTC |
	|         | ./testdata/multinodes/multinode-pod-dns-test.yaml |                      |         |         |                     |                     |
	| kubectl | -p multinode-776386 -- rollout                    | multinode-776386     | jenkins | v1.31.1 | 31 Jul 23 10:54 UTC | 31 Jul 23 10:54 UTC |
	|         | status deployment/busybox                         |                      |         |         |                     |                     |
	| kubectl | -p multinode-776386 -- get pods -o                | multinode-776386     | jenkins | v1.31.1 | 31 Jul 23 10:54 UTC | 31 Jul 23 10:54 UTC |
	|         | jsonpath='{.items[*].status.podIP}'               |                      |         |         |                     |                     |
	| kubectl | -p multinode-776386 -- get pods -o                | multinode-776386     | jenkins | v1.31.1 | 31 Jul 23 10:54 UTC | 31 Jul 23 10:54 UTC |
	|         | jsonpath='{.items[*].metadata.name}'              |                      |         |         |                     |                     |
	| kubectl | -p multinode-776386 -- exec                       | multinode-776386     | jenkins | v1.31.1 | 31 Jul 23 10:54 UTC | 31 Jul 23 10:54 UTC |
	|         | busybox-67b7f59bb-trlh5 --                        |                      |         |         |                     |                     |
	|         | nslookup kubernetes.io                            |                      |         |         |                     |                     |
	| kubectl | -p multinode-776386 -- exec                       | multinode-776386     | jenkins | v1.31.1 | 31 Jul 23 10:54 UTC | 31 Jul 23 10:54 UTC |
	|         | busybox-67b7f59bb-tvf5p --                        |                      |         |         |                     |                     |
	|         | nslookup kubernetes.io                            |                      |         |         |                     |                     |
	| kubectl | -p multinode-776386 -- exec                       | multinode-776386     | jenkins | v1.31.1 | 31 Jul 23 10:54 UTC | 31 Jul 23 10:54 UTC |
	|         | busybox-67b7f59bb-trlh5 --                        |                      |         |         |                     |                     |
	|         | nslookup kubernetes.default                       |                      |         |         |                     |                     |
	| kubectl | -p multinode-776386 -- exec                       | multinode-776386     | jenkins | v1.31.1 | 31 Jul 23 10:54 UTC | 31 Jul 23 10:54 UTC |
	|         | busybox-67b7f59bb-tvf5p --                        |                      |         |         |                     |                     |
	|         | nslookup kubernetes.default                       |                      |         |         |                     |                     |
	| kubectl | -p multinode-776386 -- exec                       | multinode-776386     | jenkins | v1.31.1 | 31 Jul 23 10:54 UTC | 31 Jul 23 10:54 UTC |
	|         | busybox-67b7f59bb-trlh5 -- nslookup               |                      |         |         |                     |                     |
	|         | kubernetes.default.svc.cluster.local              |                      |         |         |                     |                     |
	| kubectl | -p multinode-776386 -- exec                       | multinode-776386     | jenkins | v1.31.1 | 31 Jul 23 10:54 UTC | 31 Jul 23 10:54 UTC |
	|         | busybox-67b7f59bb-tvf5p -- nslookup               |                      |         |         |                     |                     |
	|         | kubernetes.default.svc.cluster.local              |                      |         |         |                     |                     |
	| kubectl | -p multinode-776386 -- get pods -o                | multinode-776386     | jenkins | v1.31.1 | 31 Jul 23 10:54 UTC | 31 Jul 23 10:54 UTC |
	|         | jsonpath='{.items[*].metadata.name}'              |                      |         |         |                     |                     |
	| kubectl | -p multinode-776386 -- exec                       | multinode-776386     | jenkins | v1.31.1 | 31 Jul 23 10:54 UTC | 31 Jul 23 10:54 UTC |
	|         | busybox-67b7f59bb-trlh5                           |                      |         |         |                     |                     |
	|         | -- sh -c nslookup                                 |                      |         |         |                     |                     |
	|         | host.minikube.internal | awk                      |                      |         |         |                     |                     |
	|         | 'NR==5' | cut -d' ' -f3                           |                      |         |         |                     |                     |
	| kubectl | -p multinode-776386 -- exec                       | multinode-776386     | jenkins | v1.31.1 | 31 Jul 23 10:54 UTC |                     |
	|         | busybox-67b7f59bb-trlh5 -- sh                     |                      |         |         |                     |                     |
	|         | -c ping -c 1 192.168.58.1                         |                      |         |         |                     |                     |
	| kubectl | -p multinode-776386 -- exec                       | multinode-776386     | jenkins | v1.31.1 | 31 Jul 23 10:54 UTC | 31 Jul 23 10:54 UTC |
	|         | busybox-67b7f59bb-tvf5p                           |                      |         |         |                     |                     |
	|         | -- sh -c nslookup                                 |                      |         |         |                     |                     |
	|         | host.minikube.internal | awk                      |                      |         |         |                     |                     |
	|         | 'NR==5' | cut -d' ' -f3                           |                      |         |         |                     |                     |
	| kubectl | -p multinode-776386 -- exec                       | multinode-776386     | jenkins | v1.31.1 | 31 Jul 23 10:54 UTC |                     |
	|         | busybox-67b7f59bb-tvf5p -- sh                     |                      |         |         |                     |                     |
	|         | -c ping -c 1 192.168.58.1                         |                      |         |         |                     |                     |
	|---------|---------------------------------------------------|----------------------|---------|---------|---------------------|---------------------|
	
	* 
	* ==> Last Start <==
	* Log file created at: 2023/07/31 10:52:16
	Running on machine: ubuntu-20-agent-15
	Binary: Built with gc go1.20.6 for linux/amd64
	Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
	I0731 10:52:16.260362   96871 out.go:296] Setting OutFile to fd 1 ...
	I0731 10:52:16.260511   96871 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I0731 10:52:16.260522   96871 out.go:309] Setting ErrFile to fd 2...
	I0731 10:52:16.260527   96871 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I0731 10:52:16.260725   96871 root.go:338] Updating PATH: /home/jenkins/minikube-integration/16969-5799/.minikube/bin
	I0731 10:52:16.261323   96871 out.go:303] Setting JSON to false
	I0731 10:52:16.262555   96871 start.go:128] hostinfo: {"hostname":"ubuntu-20-agent-15","uptime":2088,"bootTime":1690798648,"procs":615,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1038-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
	I0731 10:52:16.262613   96871 start.go:138] virtualization: kvm guest
	I0731 10:52:16.265015   96871 out.go:177] * [multinode-776386] minikube v1.31.1 on Ubuntu 20.04 (kvm/amd64)
	I0731 10:52:16.266517   96871 out.go:177]   - MINIKUBE_LOCATION=16969
	I0731 10:52:16.266539   96871 notify.go:220] Checking for updates...
	I0731 10:52:16.268069   96871 out.go:177]   - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I0731 10:52:16.269583   96871 out.go:177]   - KUBECONFIG=/home/jenkins/minikube-integration/16969-5799/kubeconfig
	I0731 10:52:16.271074   96871 out.go:177]   - MINIKUBE_HOME=/home/jenkins/minikube-integration/16969-5799/.minikube
	I0731 10:52:16.273342   96871 out.go:177]   - MINIKUBE_BIN=out/minikube-linux-amd64
	I0731 10:52:16.274657   96871 out.go:177]   - MINIKUBE_FORCE_SYSTEMD=
	I0731 10:52:16.276009   96871 driver.go:373] Setting default libvirt URI to qemu:///system
	I0731 10:52:16.296282   96871 docker.go:121] docker version: linux-24.0.5:Docker Engine - Community
	I0731 10:52:16.296370   96871 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0731 10:52:16.346327   96871 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:26 OomKillDisable:true NGoroutines:36 SystemTime:2023-07-31 10:52:16.338418732 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1038-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archi
tecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33648062464 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-15 Labels:[] ExperimentalBuild:false ServerVersion:24.0.5 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:3dce8eb055cbb6872793272b4f20ed16117344f8 Expected:3dce8eb055cbb6872793272b4f20ed16117344f8} RuncCommit:{ID:v1.1.7-0-g860f061 Expected:v1.1.7-0-g860f061} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.20.2] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
	I0731 10:52:16.346417   96871 docker.go:294] overlay module found
	I0731 10:52:16.349351   96871 out.go:177] * Using the docker driver based on user configuration
	I0731 10:52:16.350822   96871 start.go:298] selected driver: docker
	I0731 10:52:16.350839   96871 start.go:898] validating driver "docker" against <nil>
	I0731 10:52:16.350849   96871 start.go:909] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
	I0731 10:52:16.351627   96871 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0731 10:52:16.400813   96871 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:26 OomKillDisable:true NGoroutines:36 SystemTime:2023-07-31 10:52:16.392865533 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1038-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archi
tecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33648062464 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-15 Labels:[] ExperimentalBuild:false ServerVersion:24.0.5 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:3dce8eb055cbb6872793272b4f20ed16117344f8 Expected:3dce8eb055cbb6872793272b4f20ed16117344f8} RuncCommit:{ID:v1.1.7-0-g860f061 Expected:v1.1.7-0-g860f061} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.20.2] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
	I0731 10:52:16.400961   96871 start_flags.go:305] no existing cluster config was found, will generate one from the flags 
	I0731 10:52:16.401167   96871 start_flags.go:919] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
	I0731 10:52:16.402874   96871 out.go:177] * Using Docker driver with root privileges
	I0731 10:52:16.404288   96871 cni.go:84] Creating CNI manager for ""
	I0731 10:52:16.404301   96871 cni.go:136] 0 nodes found, recommending kindnet
	I0731 10:52:16.404308   96871 start_flags.go:314] Found "CNI" CNI - setting NetworkPlugin=cni
	I0731 10:52:16.404319   96871 start_flags.go:319] config:
	{Name:multinode-776386 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 Memory:2200 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.27.3 ClusterName:multinode-776386 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlu
gin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0}
	I0731 10:52:16.405876   96871 out.go:177] * Starting control plane node multinode-776386 in cluster multinode-776386
	I0731 10:52:16.407092   96871 cache.go:122] Beginning downloading kic base image for docker with crio
	I0731 10:52:16.408421   96871 out.go:177] * Pulling base image ...
	I0731 10:52:16.409732   96871 preload.go:132] Checking if preload exists for k8s version v1.27.3 and runtime crio
	I0731 10:52:16.409766   96871 preload.go:148] Found local preload: /home/jenkins/minikube-integration/16969-5799/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.27.3-cri-o-overlay-amd64.tar.lz4
	I0731 10:52:16.409780   96871 cache.go:57] Caching tarball of preloaded images
	I0731 10:52:16.409831   96871 image.go:79] Checking for gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 in local docker daemon
	I0731 10:52:16.409870   96871 preload.go:174] Found /home/jenkins/minikube-integration/16969-5799/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.27.3-cri-o-overlay-amd64.tar.lz4 in cache, skipping download
	I0731 10:52:16.409885   96871 cache.go:60] Finished verifying existence of preloaded tar for  v1.27.3 on crio
	I0731 10:52:16.410286   96871 profile.go:148] Saving config to /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/config.json ...
	I0731 10:52:16.410311   96871 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/config.json: {Name:mk4be0fbd28b418dc4391083651aae4e8a9cfa43 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0731 10:52:16.425408   96871 image.go:83] Found gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 in local docker daemon, skipping pull
	I0731 10:52:16.425444   96871 cache.go:145] gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 exists in daemon, skipping load
	I0731 10:52:16.425468   96871 cache.go:195] Successfully downloaded all kic artifacts
	I0731 10:52:16.425519   96871 start.go:365] acquiring machines lock for multinode-776386: {Name:mk86680f0c2d4a4b1f27792197bfdd2c8afe5415 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0731 10:52:16.425623   96871 start.go:369] acquired machines lock for "multinode-776386" in 83.609µs
	I0731 10:52:16.425654   96871 start.go:93] Provisioning new machine with config: &{Name:multinode-776386 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 Memory:2200 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.27.3 ClusterName:multinode-776386 Namespace:default APIServerName:minikubeCA APIServerNames:[] A
PIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.27.3 ContainerRuntime:crio ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false Cust
omQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0} &{Name: IP: Port:8443 KubernetesVersion:v1.27.3 ContainerRuntime:crio ControlPlane:true Worker:true}
	I0731 10:52:16.425763   96871 start.go:125] createHost starting for "" (driver="docker")
	I0731 10:52:16.427699   96871 out.go:204] * Creating docker container (CPUs=2, Memory=2200MB) ...
	I0731 10:52:16.427913   96871 start.go:159] libmachine.API.Create for "multinode-776386" (driver="docker")
	I0731 10:52:16.427939   96871 client.go:168] LocalClient.Create starting
	I0731 10:52:16.428046   96871 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem
	I0731 10:52:16.428085   96871 main.go:141] libmachine: Decoding PEM data...
	I0731 10:52:16.428099   96871 main.go:141] libmachine: Parsing certificate...
	I0731 10:52:16.428160   96871 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/16969-5799/.minikube/certs/cert.pem
	I0731 10:52:16.428179   96871 main.go:141] libmachine: Decoding PEM data...
	I0731 10:52:16.428192   96871 main.go:141] libmachine: Parsing certificate...
	I0731 10:52:16.428489   96871 cli_runner.go:164] Run: docker network inspect multinode-776386 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
	W0731 10:52:16.443580   96871 cli_runner.go:211] docker network inspect multinode-776386 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
	I0731 10:52:16.443629   96871 network_create.go:281] running [docker network inspect multinode-776386] to gather additional debugging logs...
	I0731 10:52:16.443644   96871 cli_runner.go:164] Run: docker network inspect multinode-776386
	W0731 10:52:16.458732   96871 cli_runner.go:211] docker network inspect multinode-776386 returned with exit code 1
	I0731 10:52:16.458754   96871 network_create.go:284] error running [docker network inspect multinode-776386]: docker network inspect multinode-776386: exit status 1
	stdout:
	[]
	
	stderr:
	Error response from daemon: network multinode-776386 not found
	I0731 10:52:16.458764   96871 network_create.go:286] output of [docker network inspect multinode-776386]: -- stdout --
	[]
	
	-- /stdout --
	** stderr ** 
	Error response from daemon: network multinode-776386 not found
	
	** /stderr **
	I0731 10:52:16.458812   96871 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
	I0731 10:52:16.474089   96871 network.go:214] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-d48e902c04f3 IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:02:42:ab:a9:43:76} reservation:<nil>}
	I0731 10:52:16.474576   96871 network.go:209] using free private subnet 192.168.58.0/24: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc0014ba0d0}
	I0731 10:52:16.474596   96871 network_create.go:123] attempt to create docker network multinode-776386 192.168.58.0/24 with gateway 192.168.58.1 and MTU of 1500 ...
	I0731 10:52:16.474634   96871 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.58.0/24 --gateway=192.168.58.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=multinode-776386 multinode-776386
	I0731 10:52:16.523398   96871 network_create.go:107] docker network multinode-776386 192.168.58.0/24 created
	I0731 10:52:16.523424   96871 kic.go:117] calculated static IP "192.168.58.2" for the "multinode-776386" container
	I0731 10:52:16.523503   96871 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
	I0731 10:52:16.538028   96871 cli_runner.go:164] Run: docker volume create multinode-776386 --label name.minikube.sigs.k8s.io=multinode-776386 --label created_by.minikube.sigs.k8s.io=true
	I0731 10:52:16.554565   96871 oci.go:103] Successfully created a docker volume multinode-776386
	I0731 10:52:16.554685   96871 cli_runner.go:164] Run: docker run --rm --name multinode-776386-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=multinode-776386 --entrypoint /usr/bin/test -v multinode-776386:/var gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 -d /var/lib
	I0731 10:52:17.043582   96871 oci.go:107] Successfully prepared a docker volume multinode-776386
	I0731 10:52:17.043633   96871 preload.go:132] Checking if preload exists for k8s version v1.27.3 and runtime crio
	I0731 10:52:17.043656   96871 kic.go:190] Starting extracting preloaded images to volume ...
	I0731 10:52:17.043745   96871 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/16969-5799/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.27.3-cri-o-overlay-amd64.tar.lz4:/preloaded.tar:ro -v multinode-776386:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 -I lz4 -xf /preloaded.tar -C /extractDir
	I0731 10:52:21.804274   96871 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/16969-5799/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.27.3-cri-o-overlay-amd64.tar.lz4:/preloaded.tar:ro -v multinode-776386:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 -I lz4 -xf /preloaded.tar -C /extractDir: (4.760445613s)
	I0731 10:52:21.804306   96871 kic.go:199] duration metric: took 4.760648 seconds to extract preloaded images to volume
	W0731 10:52:21.804443   96871 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
	I0731 10:52:21.804565   96871 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
	I0731 10:52:21.854917   96871 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname multinode-776386 --name multinode-776386 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=multinode-776386 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=multinode-776386 --network multinode-776386 --ip 192.168.58.2 --volume multinode-776386:/var --security-opt apparmor=unconfined --memory=2200mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631
	I0731 10:52:22.142859   96871 cli_runner.go:164] Run: docker container inspect multinode-776386 --format={{.State.Running}}
	I0731 10:52:22.159926   96871 cli_runner.go:164] Run: docker container inspect multinode-776386 --format={{.State.Status}}
	I0731 10:52:22.177397   96871 cli_runner.go:164] Run: docker exec multinode-776386 stat /var/lib/dpkg/alternatives/iptables
	I0731 10:52:22.225443   96871 oci.go:144] the created container "multinode-776386" has a running status.
	I0731 10:52:22.225480   96871 kic.go:221] Creating ssh key for kic: /home/jenkins/minikube-integration/16969-5799/.minikube/machines/multinode-776386/id_rsa...
	I0731 10:52:22.436365   96871 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/machines/multinode-776386/id_rsa.pub -> /home/docker/.ssh/authorized_keys
	I0731 10:52:22.436408   96871 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/16969-5799/.minikube/machines/multinode-776386/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
	I0731 10:52:22.458065   96871 cli_runner.go:164] Run: docker container inspect multinode-776386 --format={{.State.Status}}
	I0731 10:52:22.480913   96871 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
	I0731 10:52:22.480933   96871 kic_runner.go:114] Args: [docker exec --privileged multinode-776386 chown docker:docker /home/docker/.ssh/authorized_keys]
	I0731 10:52:22.546626   96871 cli_runner.go:164] Run: docker container inspect multinode-776386 --format={{.State.Status}}
	I0731 10:52:22.571247   96871 machine.go:88] provisioning docker machine ...
	I0731 10:52:22.571277   96871 ubuntu.go:169] provisioning hostname "multinode-776386"
	I0731 10:52:22.571327   96871 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-776386
	I0731 10:52:22.586424   96871 main.go:141] libmachine: Using SSH client type: native
	I0731 10:52:22.586849   96871 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x80eb00] 0x811ba0 <nil>  [] 0s} 127.0.0.1 32847 <nil> <nil>}
	I0731 10:52:22.586866   96871 main.go:141] libmachine: About to run SSH command:
	sudo hostname multinode-776386 && echo "multinode-776386" | sudo tee /etc/hostname
	I0731 10:52:22.768151   96871 main.go:141] libmachine: SSH cmd err, output: <nil>: multinode-776386
	
	I0731 10:52:22.768239   96871 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-776386
	I0731 10:52:22.784742   96871 main.go:141] libmachine: Using SSH client type: native
	I0731 10:52:22.785183   96871 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x80eb00] 0x811ba0 <nil>  [] 0s} 127.0.0.1 32847 <nil> <nil>}
	I0731 10:52:22.785203   96871 main.go:141] libmachine: About to run SSH command:
	
			if ! grep -xq '.*\smultinode-776386' /etc/hosts; then
				if grep -xq '127.0.1.1\s.*' /etc/hosts; then
					sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 multinode-776386/g' /etc/hosts;
				else 
					echo '127.0.1.1 multinode-776386' | sudo tee -a /etc/hosts; 
				fi
			fi
	I0731 10:52:22.909941   96871 main.go:141] libmachine: SSH cmd err, output: <nil>: 
	I0731 10:52:22.909969   96871 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/16969-5799/.minikube CaCertPath:/home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/16969-5799/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/16969-5799/.minikube}
	I0731 10:52:22.909998   96871 ubuntu.go:177] setting up certificates
	I0731 10:52:22.910008   96871 provision.go:83] configureAuth start
	I0731 10:52:22.910051   96871 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" multinode-776386
	I0731 10:52:22.925236   96871 provision.go:138] copyHostCerts
	I0731 10:52:22.925266   96871 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem -> /home/jenkins/minikube-integration/16969-5799/.minikube/ca.pem
	I0731 10:52:22.925294   96871 exec_runner.go:144] found /home/jenkins/minikube-integration/16969-5799/.minikube/ca.pem, removing ...
	I0731 10:52:22.925301   96871 exec_runner.go:203] rm: /home/jenkins/minikube-integration/16969-5799/.minikube/ca.pem
	I0731 10:52:22.925370   96871 exec_runner.go:151] cp: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/16969-5799/.minikube/ca.pem (1082 bytes)
	I0731 10:52:22.925431   96871 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/cert.pem -> /home/jenkins/minikube-integration/16969-5799/.minikube/cert.pem
	I0731 10:52:22.925449   96871 exec_runner.go:144] found /home/jenkins/minikube-integration/16969-5799/.minikube/cert.pem, removing ...
	I0731 10:52:22.925455   96871 exec_runner.go:203] rm: /home/jenkins/minikube-integration/16969-5799/.minikube/cert.pem
	I0731 10:52:22.925478   96871 exec_runner.go:151] cp: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/16969-5799/.minikube/cert.pem (1123 bytes)
	I0731 10:52:22.925518   96871 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/key.pem -> /home/jenkins/minikube-integration/16969-5799/.minikube/key.pem
	I0731 10:52:22.925534   96871 exec_runner.go:144] found /home/jenkins/minikube-integration/16969-5799/.minikube/key.pem, removing ...
	I0731 10:52:22.925540   96871 exec_runner.go:203] rm: /home/jenkins/minikube-integration/16969-5799/.minikube/key.pem
	I0731 10:52:22.925559   96871 exec_runner.go:151] cp: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/16969-5799/.minikube/key.pem (1675 bytes)
	I0731 10:52:22.925601   96871 provision.go:112] generating server cert: /home/jenkins/minikube-integration/16969-5799/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca-key.pem org=jenkins.multinode-776386 san=[192.168.58.2 127.0.0.1 localhost 127.0.0.1 minikube multinode-776386]
	I0731 10:52:23.119039   96871 provision.go:172] copyRemoteCerts
	I0731 10:52:23.119097   96871 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
	I0731 10:52:23.119129   96871 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-776386
	I0731 10:52:23.135073   96871 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32847 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/multinode-776386/id_rsa Username:docker}
	I0731 10:52:23.226441   96871 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/machines/server.pem -> /etc/docker/server.pem
	I0731 10:52:23.226509   96871 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/machines/server.pem --> /etc/docker/server.pem (1224 bytes)
	I0731 10:52:23.246934   96871 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/machines/server-key.pem -> /etc/docker/server-key.pem
	I0731 10:52:23.246987   96871 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
	I0731 10:52:23.266384   96871 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem -> /etc/docker/ca.pem
	I0731 10:52:23.266437   96871 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
	I0731 10:52:23.285891   96871 provision.go:86] duration metric: configureAuth took 375.873154ms
	I0731 10:52:23.285910   96871 ubuntu.go:193] setting minikube options for container-runtime
	I0731 10:52:23.286051   96871 config.go:182] Loaded profile config "multinode-776386": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.27.3
	I0731 10:52:23.286145   96871 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-776386
	I0731 10:52:23.301753   96871 main.go:141] libmachine: Using SSH client type: native
	I0731 10:52:23.302414   96871 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x80eb00] 0x811ba0 <nil>  [] 0s} 127.0.0.1 32847 <nil> <nil>}
	I0731 10:52:23.302450   96871 main.go:141] libmachine: About to run SSH command:
	sudo mkdir -p /etc/sysconfig && printf %!s(MISSING) "
	CRIO_MINIKUBE_OPTIONS='--insecure-registry 10.96.0.0/12 '
	" | sudo tee /etc/sysconfig/crio.minikube && sudo systemctl restart crio
	I0731 10:52:23.505096   96871 main.go:141] libmachine: SSH cmd err, output: <nil>: 
	CRIO_MINIKUBE_OPTIONS='--insecure-registry 10.96.0.0/12 '
	
	I0731 10:52:23.505124   96871 machine.go:91] provisioned docker machine in 933.857872ms
	I0731 10:52:23.505135   96871 client.go:171] LocalClient.Create took 7.077186621s
	I0731 10:52:23.505157   96871 start.go:167] duration metric: libmachine.API.Create for "multinode-776386" took 7.077243209s
	I0731 10:52:23.505165   96871 start.go:300] post-start starting for "multinode-776386" (driver="docker")
	I0731 10:52:23.505185   96871 start.go:329] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
	I0731 10:52:23.505254   96871 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
	I0731 10:52:23.505302   96871 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-776386
	I0731 10:52:23.522793   96871 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32847 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/multinode-776386/id_rsa Username:docker}
	I0731 10:52:23.614071   96871 ssh_runner.go:195] Run: cat /etc/os-release
	I0731 10:52:23.616732   96871 command_runner.go:130] > PRETTY_NAME="Ubuntu 22.04.2 LTS"
	I0731 10:52:23.616753   96871 command_runner.go:130] > NAME="Ubuntu"
	I0731 10:52:23.616765   96871 command_runner.go:130] > VERSION_ID="22.04"
	I0731 10:52:23.616773   96871 command_runner.go:130] > VERSION="22.04.2 LTS (Jammy Jellyfish)"
	I0731 10:52:23.616784   96871 command_runner.go:130] > VERSION_CODENAME=jammy
	I0731 10:52:23.616794   96871 command_runner.go:130] > ID=ubuntu
	I0731 10:52:23.616801   96871 command_runner.go:130] > ID_LIKE=debian
	I0731 10:52:23.616812   96871 command_runner.go:130] > HOME_URL="https://www.ubuntu.com/"
	I0731 10:52:23.616823   96871 command_runner.go:130] > SUPPORT_URL="https://help.ubuntu.com/"
	I0731 10:52:23.616834   96871 command_runner.go:130] > BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/"
	I0731 10:52:23.616850   96871 command_runner.go:130] > PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy"
	I0731 10:52:23.616861   96871 command_runner.go:130] > UBUNTU_CODENAME=jammy
	I0731 10:52:23.616921   96871 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
	I0731 10:52:23.616955   96871 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
	I0731 10:52:23.616973   96871 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
	I0731 10:52:23.616985   96871 info.go:137] Remote host: Ubuntu 22.04.2 LTS
	I0731 10:52:23.616998   96871 filesync.go:126] Scanning /home/jenkins/minikube-integration/16969-5799/.minikube/addons for local assets ...
	I0731 10:52:23.617047   96871 filesync.go:126] Scanning /home/jenkins/minikube-integration/16969-5799/.minikube/files for local assets ...
	I0731 10:52:23.617134   96871 filesync.go:149] local asset: /home/jenkins/minikube-integration/16969-5799/.minikube/files/etc/ssl/certs/125372.pem -> 125372.pem in /etc/ssl/certs
	I0731 10:52:23.617145   96871 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/files/etc/ssl/certs/125372.pem -> /etc/ssl/certs/125372.pem
	I0731 10:52:23.617249   96871 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
	I0731 10:52:23.624150   96871 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/files/etc/ssl/certs/125372.pem --> /etc/ssl/certs/125372.pem (1708 bytes)
	I0731 10:52:23.643544   96871 start.go:303] post-start completed in 138.364928ms
	I0731 10:52:23.643863   96871 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" multinode-776386
	I0731 10:52:23.659464   96871 profile.go:148] Saving config to /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/config.json ...
	I0731 10:52:23.659672   96871 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I0731 10:52:23.659706   96871 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-776386
	I0731 10:52:23.674960   96871 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32847 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/multinode-776386/id_rsa Username:docker}
	I0731 10:52:23.762426   96871 command_runner.go:130] > 17%!
	(MISSING)I0731 10:52:23.762625   96871 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
	I0731 10:52:23.766211   96871 command_runner.go:130] > 242G
	I0731 10:52:23.766440   96871 start.go:128] duration metric: createHost completed in 7.340666253s
	I0731 10:52:23.766463   96871 start.go:83] releasing machines lock for "multinode-776386", held for 7.340823191s
	I0731 10:52:23.766532   96871 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" multinode-776386
	I0731 10:52:23.783004   96871 ssh_runner.go:195] Run: cat /version.json
	I0731 10:52:23.783041   96871 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-776386
	I0731 10:52:23.783100   96871 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
	I0731 10:52:23.783158   96871 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-776386
	I0731 10:52:23.799107   96871 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32847 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/multinode-776386/id_rsa Username:docker}
	I0731 10:52:23.799506   96871 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32847 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/multinode-776386/id_rsa Username:docker}
	I0731 10:52:23.885107   96871 command_runner.go:130] > {"iso_version": "v1.30.1-1689243309-16875", "kicbase_version": "v0.0.40", "minikube_version": "v1.31.0", "commit": "085433cd1b734742870dea5be8f9ee2ce4c54148"}
	I0731 10:52:23.885250   96871 ssh_runner.go:195] Run: systemctl --version
	I0731 10:52:23.888819   96871 command_runner.go:130] > systemd 249 (249.11-0ubuntu3.9)
	I0731 10:52:23.888883   96871 command_runner.go:130] > +PAM +AUDIT +SELINUX +APPARMOR +IMA +SMACK +SECCOMP +GCRYPT +GNUTLS +OPENSSL +ACL +BLKID +CURL +ELFUTILS +FIDO2 +IDN2 -IDN +IPTC +KMOD +LIBCRYPTSETUP +LIBFDISK +PCRE2 -PWQUALITY -P11KIT -QRENCODE +BZIP2 +LZ4 +XZ +ZLIB +ZSTD -XKBCOMMON +UTMP +SYSVINIT default-hierarchy=unified
	I0731 10:52:23.888953   96871 ssh_runner.go:195] Run: sudo sh -c "podman version >/dev/null"
	I0731 10:52:23.971383   96871 command_runner.go:130] > <a href="https://github.com/kubernetes/registry.k8s.io">Temporary Redirect</a>.
	I0731 10:52:24.023855   96871 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
	I0731 10:52:24.027632   96871 command_runner.go:130] >   File: /etc/cni/net.d/200-loopback.conf
	I0731 10:52:24.027658   96871 command_runner.go:130] >   Size: 54        	Blocks: 8          IO Block: 4096   regular file
	I0731 10:52:24.027667   96871 command_runner.go:130] > Device: 37h/55d	Inode: 800494      Links: 1
	I0731 10:52:24.027677   96871 command_runner.go:130] > Access: (0644/-rw-r--r--)  Uid: (    0/    root)   Gid: (    0/    root)
	I0731 10:52:24.027683   96871 command_runner.go:130] > Access: 2023-06-14 14:44:50.000000000 +0000
	I0731 10:52:24.027690   96871 command_runner.go:130] > Modify: 2023-06-14 14:44:50.000000000 +0000
	I0731 10:52:24.027695   96871 command_runner.go:130] > Change: 2023-07-31 10:33:55.863843355 +0000
	I0731 10:52:24.027700   96871 command_runner.go:130] >  Birth: 2023-07-31 10:33:55.863843355 +0000
	I0731 10:52:24.027858   96871 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "sudo mv {} {}.mk_disabled" ;
	I0731 10:52:24.044411   96871 cni.go:221] loopback cni configuration disabled: "/etc/cni/net.d/*loopback.conf*" found
	I0731 10:52:24.044490   96871 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%!p(MISSING), " -exec sh -c "sudo mv {} {}.mk_disabled" ;
	I0731 10:52:24.069568   96871 command_runner.go:139] > /etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf, 
	I0731 10:52:24.069596   96871 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
	I0731 10:52:24.069604   96871 start.go:466] detecting cgroup driver to use...
	I0731 10:52:24.069637   96871 detect.go:196] detected "cgroupfs" cgroup driver on host os
	I0731 10:52:24.069685   96871 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
	I0731 10:52:24.082428   96871 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
	I0731 10:52:24.091659   96871 docker.go:196] disabling cri-docker service (if available) ...
	I0731 10:52:24.091716   96871 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
	I0731 10:52:24.103205   96871 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
	I0731 10:52:24.114405   96871 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
	I0731 10:52:24.188610   96871 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
	I0731 10:52:24.259176   96871 command_runner.go:130] ! Created symlink /etc/systemd/system/cri-docker.service → /dev/null.
	I0731 10:52:24.259214   96871 docker.go:212] disabling docker service ...
	I0731 10:52:24.259261   96871 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
	I0731 10:52:24.275176   96871 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
	I0731 10:52:24.284549   96871 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
	I0731 10:52:24.354390   96871 command_runner.go:130] ! Removed /etc/systemd/system/sockets.target.wants/docker.socket.
	I0731 10:52:24.354440   96871 ssh_runner.go:195] Run: sudo systemctl mask docker.service
	I0731 10:52:24.364123   96871 command_runner.go:130] ! Created symlink /etc/systemd/system/docker.service → /dev/null.
	I0731 10:52:24.436557   96871 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
	I0731 10:52:24.445949   96871 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %!s(MISSING) "runtime-endpoint: unix:///var/run/crio/crio.sock
	" | sudo tee /etc/crictl.yaml"
	I0731 10:52:24.459352   96871 command_runner.go:130] > runtime-endpoint: unix:///var/run/crio/crio.sock
	I0731 10:52:24.459389   96871 crio.go:59] configure cri-o to use "registry.k8s.io/pause:3.9" pause image...
	I0731 10:52:24.459435   96871 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|^.*pause_image = .*$|pause_image = "registry.k8s.io/pause:3.9"|' /etc/crio/crio.conf.d/02-crio.conf"
	I0731 10:52:24.467617   96871 crio.go:70] configuring cri-o to use "cgroupfs" as cgroup driver...
	I0731 10:52:24.467675   96871 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|^.*cgroup_manager = .*$|cgroup_manager = "cgroupfs"|' /etc/crio/crio.conf.d/02-crio.conf"
	I0731 10:52:24.475890   96871 ssh_runner.go:195] Run: sh -c "sudo sed -i '/conmon_cgroup = .*/d' /etc/crio/crio.conf.d/02-crio.conf"
	I0731 10:52:24.483877   96871 ssh_runner.go:195] Run: sh -c "sudo sed -i '/cgroup_manager = .*/a conmon_cgroup = "pod"' /etc/crio/crio.conf.d/02-crio.conf"
	I0731 10:52:24.491688   96871 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
	I0731 10:52:24.499162   96871 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
	I0731 10:52:24.505313   96871 command_runner.go:130] > net.bridge.bridge-nf-call-iptables = 1
	I0731 10:52:24.505891   96871 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
	I0731 10:52:24.512609   96871 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0731 10:52:24.580704   96871 ssh_runner.go:195] Run: sudo systemctl restart crio
	I0731 10:52:24.677585   96871 start.go:513] Will wait 60s for socket path /var/run/crio/crio.sock
	I0731 10:52:24.677640   96871 ssh_runner.go:195] Run: stat /var/run/crio/crio.sock
	I0731 10:52:24.680782   96871 command_runner.go:130] >   File: /var/run/crio/crio.sock
	I0731 10:52:24.680796   96871 command_runner.go:130] >   Size: 0         	Blocks: 0          IO Block: 4096   socket
	I0731 10:52:24.680802   96871 command_runner.go:130] > Device: 41h/65d	Inode: 186         Links: 1
	I0731 10:52:24.680808   96871 command_runner.go:130] > Access: (0660/srw-rw----)  Uid: (    0/    root)   Gid: (    0/    root)
	I0731 10:52:24.680814   96871 command_runner.go:130] > Access: 2023-07-31 10:52:24.662232575 +0000
	I0731 10:52:24.680833   96871 command_runner.go:130] > Modify: 2023-07-31 10:52:24.662232575 +0000
	I0731 10:52:24.680842   96871 command_runner.go:130] > Change: 2023-07-31 10:52:24.662232575 +0000
	I0731 10:52:24.680848   96871 command_runner.go:130] >  Birth: -
	I0731 10:52:24.680884   96871 start.go:534] Will wait 60s for crictl version
	I0731 10:52:24.680930   96871 ssh_runner.go:195] Run: which crictl
	I0731 10:52:24.683840   96871 command_runner.go:130] > /usr/bin/crictl
	I0731 10:52:24.683900   96871 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
	I0731 10:52:24.711560   96871 command_runner.go:130] > Version:  0.1.0
	I0731 10:52:24.711581   96871 command_runner.go:130] > RuntimeName:  cri-o
	I0731 10:52:24.711588   96871 command_runner.go:130] > RuntimeVersion:  1.24.6
	I0731 10:52:24.711598   96871 command_runner.go:130] > RuntimeApiVersion:  v1
	I0731 10:52:24.713480   96871 start.go:550] Version:  0.1.0
	RuntimeName:  cri-o
	RuntimeVersion:  1.24.6
	RuntimeApiVersion:  v1
	I0731 10:52:24.713539   96871 ssh_runner.go:195] Run: crio --version
	I0731 10:52:24.745316   96871 command_runner.go:130] > crio version 1.24.6
	I0731 10:52:24.745336   96871 command_runner.go:130] > Version:          1.24.6
	I0731 10:52:24.745344   96871 command_runner.go:130] > GitCommit:        4bfe15a9feb74ffc95e66a21c04b15fa7bbc2b90
	I0731 10:52:24.745348   96871 command_runner.go:130] > GitTreeState:     clean
	I0731 10:52:24.745354   96871 command_runner.go:130] > BuildDate:        2023-06-14T14:44:50Z
	I0731 10:52:24.745358   96871 command_runner.go:130] > GoVersion:        go1.18.2
	I0731 10:52:24.745362   96871 command_runner.go:130] > Compiler:         gc
	I0731 10:52:24.745366   96871 command_runner.go:130] > Platform:         linux/amd64
	I0731 10:52:24.745372   96871 command_runner.go:130] > Linkmode:         dynamic
	I0731 10:52:24.745378   96871 command_runner.go:130] > BuildTags:        apparmor, exclude_graphdriver_devicemapper, containers_image_ostree_stub, seccomp
	I0731 10:52:24.745383   96871 command_runner.go:130] > SeccompEnabled:   true
	I0731 10:52:24.745388   96871 command_runner.go:130] > AppArmorEnabled:  false
	I0731 10:52:24.745443   96871 ssh_runner.go:195] Run: crio --version
	I0731 10:52:24.774836   96871 command_runner.go:130] > crio version 1.24.6
	I0731 10:52:24.774858   96871 command_runner.go:130] > Version:          1.24.6
	I0731 10:52:24.774870   96871 command_runner.go:130] > GitCommit:        4bfe15a9feb74ffc95e66a21c04b15fa7bbc2b90
	I0731 10:52:24.774877   96871 command_runner.go:130] > GitTreeState:     clean
	I0731 10:52:24.774885   96871 command_runner.go:130] > BuildDate:        2023-06-14T14:44:50Z
	I0731 10:52:24.774892   96871 command_runner.go:130] > GoVersion:        go1.18.2
	I0731 10:52:24.774898   96871 command_runner.go:130] > Compiler:         gc
	I0731 10:52:24.774910   96871 command_runner.go:130] > Platform:         linux/amd64
	I0731 10:52:24.774921   96871 command_runner.go:130] > Linkmode:         dynamic
	I0731 10:52:24.774937   96871 command_runner.go:130] > BuildTags:        apparmor, exclude_graphdriver_devicemapper, containers_image_ostree_stub, seccomp
	I0731 10:52:24.774949   96871 command_runner.go:130] > SeccompEnabled:   true
	I0731 10:52:24.774963   96871 command_runner.go:130] > AppArmorEnabled:  false
	I0731 10:52:24.779207   96871 out.go:177] * Preparing Kubernetes v1.27.3 on CRI-O 1.24.6 ...
	I0731 10:52:24.780718   96871 cli_runner.go:164] Run: docker network inspect multinode-776386 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
	I0731 10:52:24.795744   96871 ssh_runner.go:195] Run: grep 192.168.58.1	host.minikube.internal$ /etc/hosts
	I0731 10:52:24.798914   96871 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.58.1	host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I0731 10:52:24.808351   96871 preload.go:132] Checking if preload exists for k8s version v1.27.3 and runtime crio
	I0731 10:52:24.808397   96871 ssh_runner.go:195] Run: sudo crictl images --output json
	I0731 10:52:24.854365   96871 command_runner.go:130] > {
	I0731 10:52:24.854383   96871 command_runner.go:130] >   "images": [
	I0731 10:52:24.854387   96871 command_runner.go:130] >     {
	I0731 10:52:24.854394   96871 command_runner.go:130] >       "id": "b0b1fa0f58c6e932b7f20bf208b2841317a1e8c88cc51b18358310bbd8ec95da",
	I0731 10:52:24.854400   96871 command_runner.go:130] >       "repoTags": [
	I0731 10:52:24.854408   96871 command_runner.go:130] >         "docker.io/kindest/kindnetd:v20230511-dc714da8"
	I0731 10:52:24.854412   96871 command_runner.go:130] >       ],
	I0731 10:52:24.854416   96871 command_runner.go:130] >       "repoDigests": [
	I0731 10:52:24.854424   96871 command_runner.go:130] >         "docker.io/kindest/kindnetd@sha256:6c00e28db008c2afa67d9ee085c86184ec9ae5281d5ae1bd15006746fb9a1974",
	I0731 10:52:24.854431   96871 command_runner.go:130] >         "docker.io/kindest/kindnetd@sha256:7c15172bd152f05b102cea9c8f82ef5abeb56797ec85630923fb98d20fd519e9"
	I0731 10:52:24.854439   96871 command_runner.go:130] >       ],
	I0731 10:52:24.854444   96871 command_runner.go:130] >       "size": "65249302",
	I0731 10:52:24.854447   96871 command_runner.go:130] >       "uid": null,
	I0731 10:52:24.854451   96871 command_runner.go:130] >       "username": "",
	I0731 10:52:24.854459   96871 command_runner.go:130] >       "spec": null,
	I0731 10:52:24.854468   96871 command_runner.go:130] >       "pinned": false
	I0731 10:52:24.854477   96871 command_runner.go:130] >     },
	I0731 10:52:24.854482   96871 command_runner.go:130] >     {
	I0731 10:52:24.854495   96871 command_runner.go:130] >       "id": "6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562",
	I0731 10:52:24.854502   96871 command_runner.go:130] >       "repoTags": [
	I0731 10:52:24.854508   96871 command_runner.go:130] >         "gcr.io/k8s-minikube/storage-provisioner:v5"
	I0731 10:52:24.854514   96871 command_runner.go:130] >       ],
	I0731 10:52:24.854518   96871 command_runner.go:130] >       "repoDigests": [
	I0731 10:52:24.854525   96871 command_runner.go:130] >         "gcr.io/k8s-minikube/storage-provisioner@sha256:18eb69d1418e854ad5a19e399310e52808a8321e4c441c1dddad8977a0d7a944",
	I0731 10:52:24.854534   96871 command_runner.go:130] >         "gcr.io/k8s-minikube/storage-provisioner@sha256:c4c05d6ad6c0f24d87b39e596d4dddf64bec3e0d84f5b36e4511d4ebf583f38f"
	I0731 10:52:24.854538   96871 command_runner.go:130] >       ],
	I0731 10:52:24.854571   96871 command_runner.go:130] >       "size": "31470524",
	I0731 10:52:24.854581   96871 command_runner.go:130] >       "uid": null,
	I0731 10:52:24.854585   96871 command_runner.go:130] >       "username": "",
	I0731 10:52:24.854589   96871 command_runner.go:130] >       "spec": null,
	I0731 10:52:24.854593   96871 command_runner.go:130] >       "pinned": false
	I0731 10:52:24.854599   96871 command_runner.go:130] >     },
	I0731 10:52:24.854603   96871 command_runner.go:130] >     {
	I0731 10:52:24.854611   96871 command_runner.go:130] >       "id": "ead0a4a53df89fd173874b46093b6e62d8c72967bbf606d672c9e8c9b601a4fc",
	I0731 10:52:24.854617   96871 command_runner.go:130] >       "repoTags": [
	I0731 10:52:24.854622   96871 command_runner.go:130] >         "registry.k8s.io/coredns/coredns:v1.10.1"
	I0731 10:52:24.854628   96871 command_runner.go:130] >       ],
	I0731 10:52:24.854632   96871 command_runner.go:130] >       "repoDigests": [
	I0731 10:52:24.854639   96871 command_runner.go:130] >         "registry.k8s.io/coredns/coredns@sha256:a0ead06651cf580044aeb0a0feba63591858fb2e43ade8c9dea45a6a89ae7e5e",
	I0731 10:52:24.854648   96871 command_runner.go:130] >         "registry.k8s.io/coredns/coredns@sha256:be7652ce0b43b1339f3d14d9b14af9f588578011092c1f7893bd55432d83a378"
	I0731 10:52:24.854652   96871 command_runner.go:130] >       ],
	I0731 10:52:24.854656   96871 command_runner.go:130] >       "size": "53621675",
	I0731 10:52:24.854662   96871 command_runner.go:130] >       "uid": null,
	I0731 10:52:24.854667   96871 command_runner.go:130] >       "username": "",
	I0731 10:52:24.854674   96871 command_runner.go:130] >       "spec": null,
	I0731 10:52:24.854678   96871 command_runner.go:130] >       "pinned": false
	I0731 10:52:24.854683   96871 command_runner.go:130] >     },
	I0731 10:52:24.854690   96871 command_runner.go:130] >     {
	I0731 10:52:24.854696   96871 command_runner.go:130] >       "id": "86b6af7dd652c1b38118be1c338e9354b33469e69a218f7e290a0ca5304ad681",
	I0731 10:52:24.854702   96871 command_runner.go:130] >       "repoTags": [
	I0731 10:52:24.854707   96871 command_runner.go:130] >         "registry.k8s.io/etcd:3.5.7-0"
	I0731 10:52:24.854713   96871 command_runner.go:130] >       ],
	I0731 10:52:24.854717   96871 command_runner.go:130] >       "repoDigests": [
	I0731 10:52:24.854723   96871 command_runner.go:130] >         "registry.k8s.io/etcd@sha256:51eae8381dcb1078289fa7b4f3df2630cdc18d09fb56f8e56b41c40e191d6c83",
	I0731 10:52:24.854732   96871 command_runner.go:130] >         "registry.k8s.io/etcd@sha256:8ae03c7bbd43d5c301eea33a39ac5eda2964f826050cb2ccf3486f18917590c9"
	I0731 10:52:24.854739   96871 command_runner.go:130] >       ],
	I0731 10:52:24.854745   96871 command_runner.go:130] >       "size": "297083935",
	I0731 10:52:24.854749   96871 command_runner.go:130] >       "uid": {
	I0731 10:52:24.854756   96871 command_runner.go:130] >         "value": "0"
	I0731 10:52:24.854759   96871 command_runner.go:130] >       },
	I0731 10:52:24.854764   96871 command_runner.go:130] >       "username": "",
	I0731 10:52:24.854770   96871 command_runner.go:130] >       "spec": null,
	I0731 10:52:24.854775   96871 command_runner.go:130] >       "pinned": false
	I0731 10:52:24.854781   96871 command_runner.go:130] >     },
	I0731 10:52:24.854784   96871 command_runner.go:130] >     {
	I0731 10:52:24.854790   96871 command_runner.go:130] >       "id": "08a0c939e61b7340db53ebf07b4d0e908a35ad8d94e2cb7d0f958210e567079a",
	I0731 10:52:24.854797   96871 command_runner.go:130] >       "repoTags": [
	I0731 10:52:24.854802   96871 command_runner.go:130] >         "registry.k8s.io/kube-apiserver:v1.27.3"
	I0731 10:52:24.854807   96871 command_runner.go:130] >       ],
	I0731 10:52:24.854812   96871 command_runner.go:130] >       "repoDigests": [
	I0731 10:52:24.854818   96871 command_runner.go:130] >         "registry.k8s.io/kube-apiserver@sha256:e4d78564d3ce7ab34940eacc61c90d035cb8a6335552c9380eaff474e791ccbb",
	I0731 10:52:24.854827   96871 command_runner.go:130] >         "registry.k8s.io/kube-apiserver@sha256:fd03335dd2e7163e5e36e933a0c735d7fec6f42b33ddafad0bc54f333e4a23c0"
	I0731 10:52:24.854831   96871 command_runner.go:130] >       ],
	I0731 10:52:24.854838   96871 command_runner.go:130] >       "size": "122065872",
	I0731 10:52:24.854842   96871 command_runner.go:130] >       "uid": {
	I0731 10:52:24.854845   96871 command_runner.go:130] >         "value": "0"
	I0731 10:52:24.854853   96871 command_runner.go:130] >       },
	I0731 10:52:24.854862   96871 command_runner.go:130] >       "username": "",
	I0731 10:52:24.854868   96871 command_runner.go:130] >       "spec": null,
	I0731 10:52:24.854872   96871 command_runner.go:130] >       "pinned": false
	I0731 10:52:24.854878   96871 command_runner.go:130] >     },
	I0731 10:52:24.854881   96871 command_runner.go:130] >     {
	I0731 10:52:24.854888   96871 command_runner.go:130] >       "id": "7cffc01dba0e151e525544f87958d12c0fa62a9f173bbc930200ce815f2aaf3f",
	I0731 10:52:24.854895   96871 command_runner.go:130] >       "repoTags": [
	I0731 10:52:24.854900   96871 command_runner.go:130] >         "registry.k8s.io/kube-controller-manager:v1.27.3"
	I0731 10:52:24.854906   96871 command_runner.go:130] >       ],
	I0731 10:52:24.854910   96871 command_runner.go:130] >       "repoDigests": [
	I0731 10:52:24.854917   96871 command_runner.go:130] >         "registry.k8s.io/kube-controller-manager@sha256:1ad8df2b525e7270cbad6fd613c4f668e336edb4436f440e49b34c4cec4fac9e",
	I0731 10:52:24.854927   96871 command_runner.go:130] >         "registry.k8s.io/kube-controller-manager@sha256:d3bdc20876edfaa4894cf8464dc98592385a43cbc033b37846dccc2460c7bc06"
	I0731 10:52:24.854931   96871 command_runner.go:130] >       ],
	I0731 10:52:24.854935   96871 command_runner.go:130] >       "size": "113919286",
	I0731 10:52:24.854939   96871 command_runner.go:130] >       "uid": {
	I0731 10:52:24.854942   96871 command_runner.go:130] >         "value": "0"
	I0731 10:52:24.854946   96871 command_runner.go:130] >       },
	I0731 10:52:24.854950   96871 command_runner.go:130] >       "username": "",
	I0731 10:52:24.854956   96871 command_runner.go:130] >       "spec": null,
	I0731 10:52:24.854960   96871 command_runner.go:130] >       "pinned": false
	I0731 10:52:24.854965   96871 command_runner.go:130] >     },
	I0731 10:52:24.854968   96871 command_runner.go:130] >     {
	I0731 10:52:24.854975   96871 command_runner.go:130] >       "id": "5780543258cf06f98595c003c0c6d22768d1fc8e9852e2839018a4bb3bfe163c",
	I0731 10:52:24.854982   96871 command_runner.go:130] >       "repoTags": [
	I0731 10:52:24.854987   96871 command_runner.go:130] >         "registry.k8s.io/kube-proxy:v1.27.3"
	I0731 10:52:24.854992   96871 command_runner.go:130] >       ],
	I0731 10:52:24.854996   96871 command_runner.go:130] >       "repoDigests": [
	I0731 10:52:24.855005   96871 command_runner.go:130] >         "registry.k8s.io/kube-proxy@sha256:091c9fe8428334e2451a0e5d214d40c415f2e0d0861794ee941f48003726570f",
	I0731 10:52:24.855011   96871 command_runner.go:130] >         "registry.k8s.io/kube-proxy@sha256:fb2bd59aae959e9649cb34101b66bb3c65f61eee9f3f81e40ed1e2325c92e699"
	I0731 10:52:24.855017   96871 command_runner.go:130] >       ],
	I0731 10:52:24.855021   96871 command_runner.go:130] >       "size": "72713623",
	I0731 10:52:24.855025   96871 command_runner.go:130] >       "uid": null,
	I0731 10:52:24.855031   96871 command_runner.go:130] >       "username": "",
	I0731 10:52:24.855035   96871 command_runner.go:130] >       "spec": null,
	I0731 10:52:24.855039   96871 command_runner.go:130] >       "pinned": false
	I0731 10:52:24.855044   96871 command_runner.go:130] >     },
	I0731 10:52:24.855048   96871 command_runner.go:130] >     {
	I0731 10:52:24.855056   96871 command_runner.go:130] >       "id": "41697ceeb70b3f49e54ed46f2cf27ac5b3a201a7d9668ca327588b23fafdf36a",
	I0731 10:52:24.855060   96871 command_runner.go:130] >       "repoTags": [
	I0731 10:52:24.855067   96871 command_runner.go:130] >         "registry.k8s.io/kube-scheduler:v1.27.3"
	I0731 10:52:24.855070   96871 command_runner.go:130] >       ],
	I0731 10:52:24.855075   96871 command_runner.go:130] >       "repoDigests": [
	I0731 10:52:24.855117   96871 command_runner.go:130] >         "registry.k8s.io/kube-scheduler@sha256:2b43d8f86e9fdc96a38743ab2b6efffd8b63d189f2c41e5de0f8deb8a8d0e082",
	I0731 10:52:24.855132   96871 command_runner.go:130] >         "registry.k8s.io/kube-scheduler@sha256:77b8db7564e395328905beb74a0b9a5db3218a4b16ec19af174957e518df40c8"
	I0731 10:52:24.855143   96871 command_runner.go:130] >       ],
	I0731 10:52:24.855150   96871 command_runner.go:130] >       "size": "59811126",
	I0731 10:52:24.855157   96871 command_runner.go:130] >       "uid": {
	I0731 10:52:24.855161   96871 command_runner.go:130] >         "value": "0"
	I0731 10:52:24.855166   96871 command_runner.go:130] >       },
	I0731 10:52:24.855170   96871 command_runner.go:130] >       "username": "",
	I0731 10:52:24.855177   96871 command_runner.go:130] >       "spec": null,
	I0731 10:52:24.855181   96871 command_runner.go:130] >       "pinned": false
	I0731 10:52:24.855185   96871 command_runner.go:130] >     },
	I0731 10:52:24.855188   96871 command_runner.go:130] >     {
	I0731 10:52:24.855197   96871 command_runner.go:130] >       "id": "e6f1816883972d4be47bd48879a08919b96afcd344132622e4d444987919323c",
	I0731 10:52:24.855203   96871 command_runner.go:130] >       "repoTags": [
	I0731 10:52:24.855212   96871 command_runner.go:130] >         "registry.k8s.io/pause:3.9"
	I0731 10:52:24.855218   96871 command_runner.go:130] >       ],
	I0731 10:52:24.855229   96871 command_runner.go:130] >       "repoDigests": [
	I0731 10:52:24.855241   96871 command_runner.go:130] >         "registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097",
	I0731 10:52:24.855252   96871 command_runner.go:130] >         "registry.k8s.io/pause@sha256:8d4106c88ec0bd28001e34c975d65175d994072d65341f62a8ab0754b0fafe10"
	I0731 10:52:24.855260   96871 command_runner.go:130] >       ],
	I0731 10:52:24.855264   96871 command_runner.go:130] >       "size": "750414",
	I0731 10:52:24.855271   96871 command_runner.go:130] >       "uid": {
	I0731 10:52:24.855275   96871 command_runner.go:130] >         "value": "65535"
	I0731 10:52:24.855278   96871 command_runner.go:130] >       },
	I0731 10:52:24.855285   96871 command_runner.go:130] >       "username": "",
	I0731 10:52:24.855289   96871 command_runner.go:130] >       "spec": null,
	I0731 10:52:24.855295   96871 command_runner.go:130] >       "pinned": false
	I0731 10:52:24.855299   96871 command_runner.go:130] >     }
	I0731 10:52:24.855304   96871 command_runner.go:130] >   ]
	I0731 10:52:24.855308   96871 command_runner.go:130] > }
	I0731 10:52:24.856468   96871 crio.go:496] all images are preloaded for cri-o runtime.
	I0731 10:52:24.856483   96871 crio.go:415] Images already preloaded, skipping extraction
	I0731 10:52:24.856519   96871 ssh_runner.go:195] Run: sudo crictl images --output json
	I0731 10:52:24.884846   96871 command_runner.go:130] > {
	I0731 10:52:24.884863   96871 command_runner.go:130] >   "images": [
	I0731 10:52:24.884866   96871 command_runner.go:130] >     {
	I0731 10:52:24.884875   96871 command_runner.go:130] >       "id": "b0b1fa0f58c6e932b7f20bf208b2841317a1e8c88cc51b18358310bbd8ec95da",
	I0731 10:52:24.884882   96871 command_runner.go:130] >       "repoTags": [
	I0731 10:52:24.884891   96871 command_runner.go:130] >         "docker.io/kindest/kindnetd:v20230511-dc714da8"
	I0731 10:52:24.884896   96871 command_runner.go:130] >       ],
	I0731 10:52:24.884902   96871 command_runner.go:130] >       "repoDigests": [
	I0731 10:52:24.884920   96871 command_runner.go:130] >         "docker.io/kindest/kindnetd@sha256:6c00e28db008c2afa67d9ee085c86184ec9ae5281d5ae1bd15006746fb9a1974",
	I0731 10:52:24.884935   96871 command_runner.go:130] >         "docker.io/kindest/kindnetd@sha256:7c15172bd152f05b102cea9c8f82ef5abeb56797ec85630923fb98d20fd519e9"
	I0731 10:52:24.884942   96871 command_runner.go:130] >       ],
	I0731 10:52:24.884947   96871 command_runner.go:130] >       "size": "65249302",
	I0731 10:52:24.884953   96871 command_runner.go:130] >       "uid": null,
	I0731 10:52:24.884957   96871 command_runner.go:130] >       "username": "",
	I0731 10:52:24.884970   96871 command_runner.go:130] >       "spec": null,
	I0731 10:52:24.884980   96871 command_runner.go:130] >       "pinned": false
	I0731 10:52:24.884986   96871 command_runner.go:130] >     },
	I0731 10:52:24.884989   96871 command_runner.go:130] >     {
	I0731 10:52:24.884996   96871 command_runner.go:130] >       "id": "6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562",
	I0731 10:52:24.885002   96871 command_runner.go:130] >       "repoTags": [
	I0731 10:52:24.885008   96871 command_runner.go:130] >         "gcr.io/k8s-minikube/storage-provisioner:v5"
	I0731 10:52:24.885014   96871 command_runner.go:130] >       ],
	I0731 10:52:24.885018   96871 command_runner.go:130] >       "repoDigests": [
	I0731 10:52:24.885025   96871 command_runner.go:130] >         "gcr.io/k8s-minikube/storage-provisioner@sha256:18eb69d1418e854ad5a19e399310e52808a8321e4c441c1dddad8977a0d7a944",
	I0731 10:52:24.885032   96871 command_runner.go:130] >         "gcr.io/k8s-minikube/storage-provisioner@sha256:c4c05d6ad6c0f24d87b39e596d4dddf64bec3e0d84f5b36e4511d4ebf583f38f"
	I0731 10:52:24.885036   96871 command_runner.go:130] >       ],
	I0731 10:52:24.885043   96871 command_runner.go:130] >       "size": "31470524",
	I0731 10:52:24.885046   96871 command_runner.go:130] >       "uid": null,
	I0731 10:52:24.885050   96871 command_runner.go:130] >       "username": "",
	I0731 10:52:24.885054   96871 command_runner.go:130] >       "spec": null,
	I0731 10:52:24.885057   96871 command_runner.go:130] >       "pinned": false
	I0731 10:52:24.885060   96871 command_runner.go:130] >     },
	I0731 10:52:24.885064   96871 command_runner.go:130] >     {
	I0731 10:52:24.885075   96871 command_runner.go:130] >       "id": "ead0a4a53df89fd173874b46093b6e62d8c72967bbf606d672c9e8c9b601a4fc",
	I0731 10:52:24.885081   96871 command_runner.go:130] >       "repoTags": [
	I0731 10:52:24.885086   96871 command_runner.go:130] >         "registry.k8s.io/coredns/coredns:v1.10.1"
	I0731 10:52:24.885089   96871 command_runner.go:130] >       ],
	I0731 10:52:24.885095   96871 command_runner.go:130] >       "repoDigests": [
	I0731 10:52:24.885101   96871 command_runner.go:130] >         "registry.k8s.io/coredns/coredns@sha256:a0ead06651cf580044aeb0a0feba63591858fb2e43ade8c9dea45a6a89ae7e5e",
	I0731 10:52:24.885110   96871 command_runner.go:130] >         "registry.k8s.io/coredns/coredns@sha256:be7652ce0b43b1339f3d14d9b14af9f588578011092c1f7893bd55432d83a378"
	I0731 10:52:24.885114   96871 command_runner.go:130] >       ],
	I0731 10:52:24.885119   96871 command_runner.go:130] >       "size": "53621675",
	I0731 10:52:24.885125   96871 command_runner.go:130] >       "uid": null,
	I0731 10:52:24.885129   96871 command_runner.go:130] >       "username": "",
	I0731 10:52:24.885135   96871 command_runner.go:130] >       "spec": null,
	I0731 10:52:24.885142   96871 command_runner.go:130] >       "pinned": false
	I0731 10:52:24.885147   96871 command_runner.go:130] >     },
	I0731 10:52:24.885151   96871 command_runner.go:130] >     {
	I0731 10:52:24.885159   96871 command_runner.go:130] >       "id": "86b6af7dd652c1b38118be1c338e9354b33469e69a218f7e290a0ca5304ad681",
	I0731 10:52:24.885166   96871 command_runner.go:130] >       "repoTags": [
	I0731 10:52:24.885171   96871 command_runner.go:130] >         "registry.k8s.io/etcd:3.5.7-0"
	I0731 10:52:24.885177   96871 command_runner.go:130] >       ],
	I0731 10:52:24.885184   96871 command_runner.go:130] >       "repoDigests": [
	I0731 10:52:24.885191   96871 command_runner.go:130] >         "registry.k8s.io/etcd@sha256:51eae8381dcb1078289fa7b4f3df2630cdc18d09fb56f8e56b41c40e191d6c83",
	I0731 10:52:24.885200   96871 command_runner.go:130] >         "registry.k8s.io/etcd@sha256:8ae03c7bbd43d5c301eea33a39ac5eda2964f826050cb2ccf3486f18917590c9"
	I0731 10:52:24.885212   96871 command_runner.go:130] >       ],
	I0731 10:52:24.885219   96871 command_runner.go:130] >       "size": "297083935",
	I0731 10:52:24.885223   96871 command_runner.go:130] >       "uid": {
	I0731 10:52:24.885229   96871 command_runner.go:130] >         "value": "0"
	I0731 10:52:24.885232   96871 command_runner.go:130] >       },
	I0731 10:52:24.885239   96871 command_runner.go:130] >       "username": "",
	I0731 10:52:24.885243   96871 command_runner.go:130] >       "spec": null,
	I0731 10:52:24.885249   96871 command_runner.go:130] >       "pinned": false
	I0731 10:52:24.885253   96871 command_runner.go:130] >     },
	I0731 10:52:24.885256   96871 command_runner.go:130] >     {
	I0731 10:52:24.885264   96871 command_runner.go:130] >       "id": "08a0c939e61b7340db53ebf07b4d0e908a35ad8d94e2cb7d0f958210e567079a",
	I0731 10:52:24.885268   96871 command_runner.go:130] >       "repoTags": [
	I0731 10:52:24.885275   96871 command_runner.go:130] >         "registry.k8s.io/kube-apiserver:v1.27.3"
	I0731 10:52:24.885279   96871 command_runner.go:130] >       ],
	I0731 10:52:24.885288   96871 command_runner.go:130] >       "repoDigests": [
	I0731 10:52:24.885294   96871 command_runner.go:130] >         "registry.k8s.io/kube-apiserver@sha256:e4d78564d3ce7ab34940eacc61c90d035cb8a6335552c9380eaff474e791ccbb",
	I0731 10:52:24.885304   96871 command_runner.go:130] >         "registry.k8s.io/kube-apiserver@sha256:fd03335dd2e7163e5e36e933a0c735d7fec6f42b33ddafad0bc54f333e4a23c0"
	I0731 10:52:24.885310   96871 command_runner.go:130] >       ],
	I0731 10:52:24.885314   96871 command_runner.go:130] >       "size": "122065872",
	I0731 10:52:24.885321   96871 command_runner.go:130] >       "uid": {
	I0731 10:52:24.885326   96871 command_runner.go:130] >         "value": "0"
	I0731 10:52:24.885330   96871 command_runner.go:130] >       },
	I0731 10:52:24.885334   96871 command_runner.go:130] >       "username": "",
	I0731 10:52:24.885338   96871 command_runner.go:130] >       "spec": null,
	I0731 10:52:24.885344   96871 command_runner.go:130] >       "pinned": false
	I0731 10:52:24.885348   96871 command_runner.go:130] >     },
	I0731 10:52:24.885352   96871 command_runner.go:130] >     {
	I0731 10:52:24.885358   96871 command_runner.go:130] >       "id": "7cffc01dba0e151e525544f87958d12c0fa62a9f173bbc930200ce815f2aaf3f",
	I0731 10:52:24.885364   96871 command_runner.go:130] >       "repoTags": [
	I0731 10:52:24.885369   96871 command_runner.go:130] >         "registry.k8s.io/kube-controller-manager:v1.27.3"
	I0731 10:52:24.885375   96871 command_runner.go:130] >       ],
	I0731 10:52:24.885379   96871 command_runner.go:130] >       "repoDigests": [
	I0731 10:52:24.885391   96871 command_runner.go:130] >         "registry.k8s.io/kube-controller-manager@sha256:1ad8df2b525e7270cbad6fd613c4f668e336edb4436f440e49b34c4cec4fac9e",
	I0731 10:52:24.885402   96871 command_runner.go:130] >         "registry.k8s.io/kube-controller-manager@sha256:d3bdc20876edfaa4894cf8464dc98592385a43cbc033b37846dccc2460c7bc06"
	I0731 10:52:24.885408   96871 command_runner.go:130] >       ],
	I0731 10:52:24.885412   96871 command_runner.go:130] >       "size": "113919286",
	I0731 10:52:24.885416   96871 command_runner.go:130] >       "uid": {
	I0731 10:52:24.885420   96871 command_runner.go:130] >         "value": "0"
	I0731 10:52:24.885423   96871 command_runner.go:130] >       },
	I0731 10:52:24.885430   96871 command_runner.go:130] >       "username": "",
	I0731 10:52:24.885434   96871 command_runner.go:130] >       "spec": null,
	I0731 10:52:24.885446   96871 command_runner.go:130] >       "pinned": false
	I0731 10:52:24.885452   96871 command_runner.go:130] >     },
	I0731 10:52:24.885455   96871 command_runner.go:130] >     {
	I0731 10:52:24.885463   96871 command_runner.go:130] >       "id": "5780543258cf06f98595c003c0c6d22768d1fc8e9852e2839018a4bb3bfe163c",
	I0731 10:52:24.885468   96871 command_runner.go:130] >       "repoTags": [
	I0731 10:52:24.885473   96871 command_runner.go:130] >         "registry.k8s.io/kube-proxy:v1.27.3"
	I0731 10:52:24.885479   96871 command_runner.go:130] >       ],
	I0731 10:52:24.885483   96871 command_runner.go:130] >       "repoDigests": [
	I0731 10:52:24.885492   96871 command_runner.go:130] >         "registry.k8s.io/kube-proxy@sha256:091c9fe8428334e2451a0e5d214d40c415f2e0d0861794ee941f48003726570f",
	I0731 10:52:24.885504   96871 command_runner.go:130] >         "registry.k8s.io/kube-proxy@sha256:fb2bd59aae959e9649cb34101b66bb3c65f61eee9f3f81e40ed1e2325c92e699"
	I0731 10:52:24.885508   96871 command_runner.go:130] >       ],
	I0731 10:52:24.885513   96871 command_runner.go:130] >       "size": "72713623",
	I0731 10:52:24.885516   96871 command_runner.go:130] >       "uid": null,
	I0731 10:52:24.885523   96871 command_runner.go:130] >       "username": "",
	I0731 10:52:24.885529   96871 command_runner.go:130] >       "spec": null,
	I0731 10:52:24.885533   96871 command_runner.go:130] >       "pinned": false
	I0731 10:52:24.885539   96871 command_runner.go:130] >     },
	I0731 10:52:24.885542   96871 command_runner.go:130] >     {
	I0731 10:52:24.885550   96871 command_runner.go:130] >       "id": "41697ceeb70b3f49e54ed46f2cf27ac5b3a201a7d9668ca327588b23fafdf36a",
	I0731 10:52:24.885554   96871 command_runner.go:130] >       "repoTags": [
	I0731 10:52:24.885562   96871 command_runner.go:130] >         "registry.k8s.io/kube-scheduler:v1.27.3"
	I0731 10:52:24.885565   96871 command_runner.go:130] >       ],
	I0731 10:52:24.885572   96871 command_runner.go:130] >       "repoDigests": [
	I0731 10:52:24.885613   96871 command_runner.go:130] >         "registry.k8s.io/kube-scheduler@sha256:2b43d8f86e9fdc96a38743ab2b6efffd8b63d189f2c41e5de0f8deb8a8d0e082",
	I0731 10:52:24.885625   96871 command_runner.go:130] >         "registry.k8s.io/kube-scheduler@sha256:77b8db7564e395328905beb74a0b9a5db3218a4b16ec19af174957e518df40c8"
	I0731 10:52:24.885628   96871 command_runner.go:130] >       ],
	I0731 10:52:24.885632   96871 command_runner.go:130] >       "size": "59811126",
	I0731 10:52:24.885639   96871 command_runner.go:130] >       "uid": {
	I0731 10:52:24.885646   96871 command_runner.go:130] >         "value": "0"
	I0731 10:52:24.885652   96871 command_runner.go:130] >       },
	I0731 10:52:24.885662   96871 command_runner.go:130] >       "username": "",
	I0731 10:52:24.885668   96871 command_runner.go:130] >       "spec": null,
	I0731 10:52:24.885678   96871 command_runner.go:130] >       "pinned": false
	I0731 10:52:24.885683   96871 command_runner.go:130] >     },
	I0731 10:52:24.885691   96871 command_runner.go:130] >     {
	I0731 10:52:24.885701   96871 command_runner.go:130] >       "id": "e6f1816883972d4be47bd48879a08919b96afcd344132622e4d444987919323c",
	I0731 10:52:24.885710   96871 command_runner.go:130] >       "repoTags": [
	I0731 10:52:24.885715   96871 command_runner.go:130] >         "registry.k8s.io/pause:3.9"
	I0731 10:52:24.885722   96871 command_runner.go:130] >       ],
	I0731 10:52:24.885726   96871 command_runner.go:130] >       "repoDigests": [
	I0731 10:52:24.885735   96871 command_runner.go:130] >         "registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097",
	I0731 10:52:24.885742   96871 command_runner.go:130] >         "registry.k8s.io/pause@sha256:8d4106c88ec0bd28001e34c975d65175d994072d65341f62a8ab0754b0fafe10"
	I0731 10:52:24.885748   96871 command_runner.go:130] >       ],
	I0731 10:52:24.885752   96871 command_runner.go:130] >       "size": "750414",
	I0731 10:52:24.885758   96871 command_runner.go:130] >       "uid": {
	I0731 10:52:24.885765   96871 command_runner.go:130] >         "value": "65535"
	I0731 10:52:24.885771   96871 command_runner.go:130] >       },
	I0731 10:52:24.885775   96871 command_runner.go:130] >       "username": "",
	I0731 10:52:24.885782   96871 command_runner.go:130] >       "spec": null,
	I0731 10:52:24.885786   96871 command_runner.go:130] >       "pinned": false
	I0731 10:52:24.885791   96871 command_runner.go:130] >     }
	I0731 10:52:24.885795   96871 command_runner.go:130] >   ]
	I0731 10:52:24.885801   96871 command_runner.go:130] > }
	I0731 10:52:24.886983   96871 crio.go:496] all images are preloaded for cri-o runtime.
	I0731 10:52:24.886999   96871 cache_images.go:84] Images are preloaded, skipping loading
	I0731 10:52:24.887054   96871 ssh_runner.go:195] Run: crio config
	I0731 10:52:24.922065   96871 command_runner.go:130] > # The CRI-O configuration file specifies all of the available configuration
	I0731 10:52:24.922094   96871 command_runner.go:130] > # options and command-line flags for the crio(8) OCI Kubernetes Container Runtime
	I0731 10:52:24.922105   96871 command_runner.go:130] > # daemon, but in a TOML format that can be more easily modified and versioned.
	I0731 10:52:24.922111   96871 command_runner.go:130] > #
	I0731 10:52:24.922122   96871 command_runner.go:130] > # Please refer to crio.conf(5) for details of all configuration options.
	I0731 10:52:24.922133   96871 command_runner.go:130] > # CRI-O supports partial configuration reload during runtime, which can be
	I0731 10:52:24.922149   96871 command_runner.go:130] > # done by sending SIGHUP to the running process. Currently supported options
	I0731 10:52:24.922176   96871 command_runner.go:130] > # are explicitly mentioned with: 'This option supports live configuration
	I0731 10:52:24.922199   96871 command_runner.go:130] > # reload'.
	I0731 10:52:24.922210   96871 command_runner.go:130] > # CRI-O reads its storage defaults from the containers-storage.conf(5) file
	I0731 10:52:24.922223   96871 command_runner.go:130] > # located at /etc/containers/storage.conf. Modify this storage configuration if
	I0731 10:52:24.922231   96871 command_runner.go:130] > # you want to change the system's defaults. If you want to modify storage just
	I0731 10:52:24.922237   96871 command_runner.go:130] > # for CRI-O, you can change the storage configuration options here.
	I0731 10:52:24.922243   96871 command_runner.go:130] > [crio]
	I0731 10:52:24.922248   96871 command_runner.go:130] > # Path to the "root directory". CRI-O stores all of its data, including
	I0731 10:52:24.922256   96871 command_runner.go:130] > # containers images, in this directory.
	I0731 10:52:24.922266   96871 command_runner.go:130] > # root = "/home/docker/.local/share/containers/storage"
	I0731 10:52:24.922275   96871 command_runner.go:130] > # Path to the "run directory". CRI-O stores all of its state in this directory.
	I0731 10:52:24.922282   96871 command_runner.go:130] > # runroot = "/tmp/containers-user-1000/containers"
	I0731 10:52:24.922288   96871 command_runner.go:130] > # Storage driver used to manage the storage of images and containers. Please
	I0731 10:52:24.922297   96871 command_runner.go:130] > # refer to containers-storage.conf(5) to see all available storage drivers.
	I0731 10:52:24.922303   96871 command_runner.go:130] > # storage_driver = "vfs"
	I0731 10:52:24.922316   96871 command_runner.go:130] > # List to pass options to the storage driver. Please refer to
	I0731 10:52:24.922326   96871 command_runner.go:130] > # containers-storage.conf(5) to see all available storage options.
	I0731 10:52:24.922333   96871 command_runner.go:130] > # storage_option = [
	I0731 10:52:24.922342   96871 command_runner.go:130] > # ]
	I0731 10:52:24.922352   96871 command_runner.go:130] > # The default log directory where all logs will go unless directly specified by
	I0731 10:52:24.922366   96871 command_runner.go:130] > # the kubelet. The log directory specified must be an absolute directory.
	I0731 10:52:24.922374   96871 command_runner.go:130] > # log_dir = "/var/log/crio/pods"
	I0731 10:52:24.922383   96871 command_runner.go:130] > # Location for CRI-O to lay down the temporary version file.
	I0731 10:52:24.922393   96871 command_runner.go:130] > # It is used to check if crio wipe should wipe containers, which should
	I0731 10:52:24.922404   96871 command_runner.go:130] > # always happen on a node reboot
	I0731 10:52:24.922414   96871 command_runner.go:130] > # version_file = "/var/run/crio/version"
	I0731 10:52:24.922428   96871 command_runner.go:130] > # Location for CRI-O to lay down the persistent version file.
	I0731 10:52:24.922441   96871 command_runner.go:130] > # It is used to check if crio wipe should wipe images, which should
	I0731 10:52:24.922459   96871 command_runner.go:130] > # only happen when CRI-O has been upgraded
	I0731 10:52:24.922472   96871 command_runner.go:130] > # version_file_persist = "/var/lib/crio/version"
	I0731 10:52:24.922488   96871 command_runner.go:130] > # InternalWipe is whether CRI-O should wipe containers and images after a reboot when the server starts.
	I0731 10:52:24.922504   96871 command_runner.go:130] > # If set to false, one must use the external command 'crio wipe' to wipe the containers and images in these situations.
	I0731 10:52:24.922513   96871 command_runner.go:130] > # internal_wipe = true
	I0731 10:52:24.922519   96871 command_runner.go:130] > # Location for CRI-O to lay down the clean shutdown file.
	I0731 10:52:24.922531   96871 command_runner.go:130] > # It is used to check whether crio had time to sync before shutting down.
	I0731 10:52:24.922544   96871 command_runner.go:130] > # If not found, crio wipe will clear the storage directory.
	I0731 10:52:24.922554   96871 command_runner.go:130] > # clean_shutdown_file = "/var/lib/crio/clean.shutdown"
	I0731 10:52:24.922572   96871 command_runner.go:130] > # The crio.api table contains settings for the kubelet/gRPC interface.
	I0731 10:52:24.922581   96871 command_runner.go:130] > [crio.api]
	I0731 10:52:24.922591   96871 command_runner.go:130] > # Path to AF_LOCAL socket on which CRI-O will listen.
	I0731 10:52:24.922601   96871 command_runner.go:130] > # listen = "/var/run/crio/crio.sock"
	I0731 10:52:24.922609   96871 command_runner.go:130] > # IP address on which the stream server will listen.
	I0731 10:52:24.922620   96871 command_runner.go:130] > # stream_address = "127.0.0.1"
	I0731 10:52:24.922631   96871 command_runner.go:130] > # The port on which the stream server will listen. If the port is set to "0", then
	I0731 10:52:24.922643   96871 command_runner.go:130] > # CRI-O will allocate a random free port number.
	I0731 10:52:24.922651   96871 command_runner.go:130] > # stream_port = "0"
	I0731 10:52:24.922662   96871 command_runner.go:130] > # Enable encrypted TLS transport of the stream server.
	I0731 10:52:24.922673   96871 command_runner.go:130] > # stream_enable_tls = false
	I0731 10:52:24.922683   96871 command_runner.go:130] > # Length of time until open streams terminate due to lack of activity
	I0731 10:52:24.922693   96871 command_runner.go:130] > # stream_idle_timeout = ""
	I0731 10:52:24.922704   96871 command_runner.go:130] > # Path to the x509 certificate file used to serve the encrypted stream. This
	I0731 10:52:24.922717   96871 command_runner.go:130] > # file can change, and CRI-O will automatically pick up the changes within 5
	I0731 10:52:24.922735   96871 command_runner.go:130] > # minutes.
	I0731 10:52:24.922748   96871 command_runner.go:130] > # stream_tls_cert = ""
	I0731 10:52:24.922762   96871 command_runner.go:130] > # Path to the key file used to serve the encrypted stream. This file can
	I0731 10:52:24.922775   96871 command_runner.go:130] > # change and CRI-O will automatically pick up the changes within 5 minutes.
	I0731 10:52:24.922782   96871 command_runner.go:130] > # stream_tls_key = ""
	I0731 10:52:24.922793   96871 command_runner.go:130] > # Path to the x509 CA(s) file used to verify and authenticate client
	I0731 10:52:24.922807   96871 command_runner.go:130] > # communication with the encrypted stream. This file can change and CRI-O will
	I0731 10:52:24.922820   96871 command_runner.go:130] > # automatically pick up the changes within 5 minutes.
	I0731 10:52:24.922889   96871 command_runner.go:130] > # stream_tls_ca = ""
	I0731 10:52:24.922907   96871 command_runner.go:130] > # Maximum grpc send message size in bytes. If not set or <=0, then CRI-O will default to 16 * 1024 * 1024.
	I0731 10:52:24.922918   96871 command_runner.go:130] > # grpc_max_send_msg_size = 83886080
	I0731 10:52:24.922933   96871 command_runner.go:130] > # Maximum grpc receive message size. If not set or <= 0, then CRI-O will default to 16 * 1024 * 1024.
	I0731 10:52:24.922945   96871 command_runner.go:130] > # grpc_max_recv_msg_size = 83886080
	I0731 10:52:24.922995   96871 command_runner.go:130] > # The crio.runtime table contains settings pertaining to the OCI runtime used
	I0731 10:52:24.923010   96871 command_runner.go:130] > # and options for how to set up and manage the OCI runtime.
	I0731 10:52:24.923018   96871 command_runner.go:130] > [crio.runtime]
	I0731 10:52:24.923032   96871 command_runner.go:130] > # A list of ulimits to be set in containers by default, specified as
	I0731 10:52:24.923045   96871 command_runner.go:130] > # "<ulimit name>=<soft limit>:<hard limit>", for example:
	I0731 10:52:24.923056   96871 command_runner.go:130] > # "nofile=1024:2048"
	I0731 10:52:24.923071   96871 command_runner.go:130] > # If nothing is set here, settings will be inherited from the CRI-O daemon
	I0731 10:52:24.923081   96871 command_runner.go:130] > # default_ulimits = [
	I0731 10:52:24.923090   96871 command_runner.go:130] > # ]
	I0731 10:52:24.923102   96871 command_runner.go:130] > # If true, the runtime will not use pivot_root, but instead use MS_MOVE.
	I0731 10:52:24.923115   96871 command_runner.go:130] > # no_pivot = false
	I0731 10:52:24.923128   96871 command_runner.go:130] > # decryption_keys_path is the path where the keys required for
	I0731 10:52:24.923140   96871 command_runner.go:130] > # image decryption are stored. This option supports live configuration reload.
	I0731 10:52:24.923152   96871 command_runner.go:130] > # decryption_keys_path = "/etc/crio/keys/"
	I0731 10:52:24.923166   96871 command_runner.go:130] > # Path to the conmon binary, used for monitoring the OCI runtime.
	I0731 10:52:24.923177   96871 command_runner.go:130] > # Will be searched for using $PATH if empty.
	I0731 10:52:24.923188   96871 command_runner.go:130] > # This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorEnv.
	I0731 10:52:24.923195   96871 command_runner.go:130] > # conmon = ""
	I0731 10:52:24.923206   96871 command_runner.go:130] > # Cgroup setting for conmon
	I0731 10:52:24.923221   96871 command_runner.go:130] > # This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorCgroup.
	I0731 10:52:24.923231   96871 command_runner.go:130] > conmon_cgroup = "pod"
	I0731 10:52:24.923244   96871 command_runner.go:130] > # Environment variable list for the conmon process, used for passing necessary
	I0731 10:52:24.923254   96871 command_runner.go:130] > # environment variables to conmon or the runtime.
	I0731 10:52:24.923269   96871 command_runner.go:130] > # This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorEnv.
	I0731 10:52:24.923278   96871 command_runner.go:130] > # conmon_env = [
	I0731 10:52:24.923286   96871 command_runner.go:130] > # ]
	I0731 10:52:24.923293   96871 command_runner.go:130] > # Additional environment variables to set for all the
	I0731 10:52:24.923310   96871 command_runner.go:130] > # containers. These are overridden if set in the
	I0731 10:52:24.923325   96871 command_runner.go:130] > # container image spec or in the container runtime configuration.
	I0731 10:52:24.923332   96871 command_runner.go:130] > # default_env = [
	I0731 10:52:24.923341   96871 command_runner.go:130] > # ]
	I0731 10:52:24.923351   96871 command_runner.go:130] > # If true, SELinux will be used for pod separation on the host.
	I0731 10:52:24.923361   96871 command_runner.go:130] > # selinux = false
	I0731 10:52:24.923373   96871 command_runner.go:130] > # Path to the seccomp.json profile which is used as the default seccomp profile
	I0731 10:52:24.923387   96871 command_runner.go:130] > # for the runtime. If not specified, then the internal default seccomp profile
	I0731 10:52:24.923400   96871 command_runner.go:130] > # will be used. This option supports live configuration reload.
	I0731 10:52:24.923411   96871 command_runner.go:130] > # seccomp_profile = ""
	I0731 10:52:24.923424   96871 command_runner.go:130] > # Changes the meaning of an empty seccomp profile. By default
	I0731 10:52:24.923437   96871 command_runner.go:130] > # (and according to CRI spec), an empty profile means unconfined.
	I0731 10:52:24.923449   96871 command_runner.go:130] > # This option tells CRI-O to treat an empty profile as the default profile,
	I0731 10:52:24.923464   96871 command_runner.go:130] > # which might increase security.
	I0731 10:52:24.923476   96871 command_runner.go:130] > # seccomp_use_default_when_empty = true
	I0731 10:52:24.923491   96871 command_runner.go:130] > # Used to change the name of the default AppArmor profile of CRI-O. The default
	I0731 10:52:24.923509   96871 command_runner.go:130] > # profile name is "crio-default". This profile only takes effect if the user
	I0731 10:52:24.923523   96871 command_runner.go:130] > # does not specify a profile via the Kubernetes Pod's metadata annotation. If
	I0731 10:52:24.923537   96871 command_runner.go:130] > # the profile is set to "unconfined", then this equals to disabling AppArmor.
	I0731 10:52:24.923546   96871 command_runner.go:130] > # This option supports live configuration reload.
	I0731 10:52:24.923557   96871 command_runner.go:130] > # apparmor_profile = "crio-default"
	I0731 10:52:24.923574   96871 command_runner.go:130] > # Path to the blockio class configuration file for configuring
	I0731 10:52:24.923585   96871 command_runner.go:130] > # the cgroup blockio controller.
	I0731 10:52:24.923596   96871 command_runner.go:130] > # blockio_config_file = ""
	I0731 10:52:24.923608   96871 command_runner.go:130] > # Used to change irqbalance service config file path which is used for configuring
	I0731 10:52:24.923619   96871 command_runner.go:130] > # irqbalance daemon.
	I0731 10:52:24.923636   96871 command_runner.go:130] > # irqbalance_config_file = "/etc/sysconfig/irqbalance"
	I0731 10:52:24.923651   96871 command_runner.go:130] > # Path to the RDT configuration file for configuring the resctrl pseudo-filesystem.
	I0731 10:52:24.923664   96871 command_runner.go:130] > # This option supports live configuration reload.
	I0731 10:52:24.923675   96871 command_runner.go:130] > # rdt_config_file = ""
	I0731 10:52:24.923688   96871 command_runner.go:130] > # Cgroup management implementation used for the runtime.
	I0731 10:52:24.923699   96871 command_runner.go:130] > cgroup_manager = "cgroupfs"
	I0731 10:52:24.923711   96871 command_runner.go:130] > # Specify whether the image pull must be performed in a separate cgroup.
	I0731 10:52:24.923720   96871 command_runner.go:130] > # separate_pull_cgroup = ""
	I0731 10:52:24.923736   96871 command_runner.go:130] > # List of default capabilities for containers. If it is empty or commented out,
	I0731 10:52:24.923750   96871 command_runner.go:130] > # only the capabilities defined in the containers json file by the user/kube
	I0731 10:52:24.923760   96871 command_runner.go:130] > # will be added.
	I0731 10:52:24.923771   96871 command_runner.go:130] > # default_capabilities = [
	I0731 10:52:24.923779   96871 command_runner.go:130] > # 	"CHOWN",
	I0731 10:52:24.923789   96871 command_runner.go:130] > # 	"DAC_OVERRIDE",
	I0731 10:52:24.923797   96871 command_runner.go:130] > # 	"FSETID",
	I0731 10:52:24.923804   96871 command_runner.go:130] > # 	"FOWNER",
	I0731 10:52:24.923814   96871 command_runner.go:130] > # 	"SETGID",
	I0731 10:52:24.923821   96871 command_runner.go:130] > # 	"SETUID",
	I0731 10:52:24.923831   96871 command_runner.go:130] > # 	"SETPCAP",
	I0731 10:52:24.923838   96871 command_runner.go:130] > # 	"NET_BIND_SERVICE",
	I0731 10:52:24.923847   96871 command_runner.go:130] > # 	"KILL",
	I0731 10:52:24.923853   96871 command_runner.go:130] > # ]
	I0731 10:52:24.923870   96871 command_runner.go:130] > # Add capabilities to the inheritable set, as well as the default group of permitted, bounding and effective.
	I0731 10:52:24.923885   96871 command_runner.go:130] > # If capabilities are expected to work for non-root users, this option should be set.
	I0731 10:52:24.923896   96871 command_runner.go:130] > # add_inheritable_capabilities = true
	I0731 10:52:24.923910   96871 command_runner.go:130] > # List of default sysctls. If it is empty or commented out, only the sysctls
	I0731 10:52:24.923955   96871 command_runner.go:130] > # defined in the container json file by the user/kube will be added.
	I0731 10:52:24.923970   96871 command_runner.go:130] > # default_sysctls = [
	I0731 10:52:24.923975   96871 command_runner.go:130] > # ]
	I0731 10:52:24.923983   96871 command_runner.go:130] > # List of devices on the host that a
	I0731 10:52:24.923994   96871 command_runner.go:130] > # user can specify with the "io.kubernetes.cri-o.Devices" allowed annotation.
	I0731 10:52:24.924000   96871 command_runner.go:130] > # allowed_devices = [
	I0731 10:52:24.924008   96871 command_runner.go:130] > # 	"/dev/fuse",
	I0731 10:52:24.924017   96871 command_runner.go:130] > # ]
	I0731 10:52:24.924025   96871 command_runner.go:130] > # List of additional devices. specified as
	I0731 10:52:24.924067   96871 command_runner.go:130] > # "<device-on-host>:<device-on-container>:<permissions>", for example: "--device=/dev/sdc:/dev/xvdc:rwm".
	I0731 10:52:24.924081   96871 command_runner.go:130] > # If it is empty or commented out, only the devices
	I0731 10:52:24.924091   96871 command_runner.go:130] > # defined in the container json file by the user/kube will be added.
	I0731 10:52:24.924105   96871 command_runner.go:130] > # additional_devices = [
	I0731 10:52:24.924113   96871 command_runner.go:130] > # ]
	I0731 10:52:24.924125   96871 command_runner.go:130] > # List of directories to scan for CDI Spec files.
	I0731 10:52:24.924134   96871 command_runner.go:130] > # cdi_spec_dirs = [
	I0731 10:52:24.924143   96871 command_runner.go:130] > # 	"/etc/cdi",
	I0731 10:52:24.924150   96871 command_runner.go:130] > # 	"/var/run/cdi",
	I0731 10:52:24.924157   96871 command_runner.go:130] > # ]
	I0731 10:52:24.924171   96871 command_runner.go:130] > # Change the default behavior of setting container devices uid/gid from CRI's
	I0731 10:52:24.924185   96871 command_runner.go:130] > # SecurityContext (RunAsUser/RunAsGroup) instead of taking host's uid/gid.
	I0731 10:52:24.924195   96871 command_runner.go:130] > # Defaults to false.
	I0731 10:52:24.924207   96871 command_runner.go:130] > # device_ownership_from_security_context = false
	I0731 10:52:24.924221   96871 command_runner.go:130] > # Path to OCI hooks directories for automatically executed hooks. If one of the
	I0731 10:52:24.924234   96871 command_runner.go:130] > # directories does not exist, then CRI-O will automatically skip them.
	I0731 10:52:24.924244   96871 command_runner.go:130] > # hooks_dir = [
	I0731 10:52:24.924252   96871 command_runner.go:130] > # 	"/usr/share/containers/oci/hooks.d",
	I0731 10:52:24.924259   96871 command_runner.go:130] > # ]
	I0731 10:52:24.924270   96871 command_runner.go:130] > # Path to the file specifying the defaults mounts for each container. The
	I0731 10:52:24.924285   96871 command_runner.go:130] > # format of the config is /SRC:/DST, one mount per line. Notice that CRI-O reads
	I0731 10:52:24.924297   96871 command_runner.go:130] > # its default mounts from the following two files:
	I0731 10:52:24.924306   96871 command_runner.go:130] > #
	I0731 10:52:24.924319   96871 command_runner.go:130] > #   1) /etc/containers/mounts.conf (i.e., default_mounts_file): This is the
	I0731 10:52:24.924333   96871 command_runner.go:130] > #      override file, where users can either add in their own default mounts, or
	I0731 10:52:24.924345   96871 command_runner.go:130] > #      override the default mounts shipped with the package.
	I0731 10:52:24.924351   96871 command_runner.go:130] > #
	I0731 10:52:24.924362   96871 command_runner.go:130] > #   2) /usr/share/containers/mounts.conf: This is the default file read for
	I0731 10:52:24.924376   96871 command_runner.go:130] > #      mounts. If you want CRI-O to read from a different, specific mounts file,
	I0731 10:52:24.924390   96871 command_runner.go:130] > #      you can change the default_mounts_file. Note, if this is done, CRI-O will
	I0731 10:52:24.924402   96871 command_runner.go:130] > #      only add mounts it finds in this file.
	I0731 10:52:24.924410   96871 command_runner.go:130] > #
	I0731 10:52:24.924420   96871 command_runner.go:130] > # default_mounts_file = ""
	I0731 10:52:24.924431   96871 command_runner.go:130] > # Maximum number of processes allowed in a container.
	I0731 10:52:24.924440   96871 command_runner.go:130] > # This option is deprecated. The Kubelet flag '--pod-pids-limit' should be used instead.
	I0731 10:52:24.924454   96871 command_runner.go:130] > # pids_limit = 0
	I0731 10:52:24.924468   96871 command_runner.go:130] > # Maximum sized allowed for the container log file. Negative numbers indicate
	I0731 10:52:24.924482   96871 command_runner.go:130] > # that no size limit is imposed. If it is positive, it must be >= 8192 to
	I0731 10:52:24.924496   96871 command_runner.go:130] > # match/exceed conmon's read buffer. The file is truncated and re-opened so the
	I0731 10:52:24.924513   96871 command_runner.go:130] > # limit is never exceeded. This option is deprecated. The Kubelet flag '--container-log-max-size' should be used instead.
	I0731 10:52:24.924522   96871 command_runner.go:130] > # log_size_max = -1
	I0731 10:52:24.924534   96871 command_runner.go:130] > # Whether container output should be logged to journald in addition to the kuberentes log file
	I0731 10:52:24.924545   96871 command_runner.go:130] > # log_to_journald = false
	I0731 10:52:24.924559   96871 command_runner.go:130] > # Path to directory in which container exit files are written to by conmon.
	I0731 10:52:24.924571   96871 command_runner.go:130] > # container_exits_dir = "/var/run/crio/exits"
	I0731 10:52:24.924586   96871 command_runner.go:130] > # Path to directory for container attach sockets.
	I0731 10:52:24.924598   96871 command_runner.go:130] > # container_attach_socket_dir = "/var/run/crio"
	I0731 10:52:24.924611   96871 command_runner.go:130] > # The prefix to use for the source of the bind mounts.
	I0731 10:52:24.924620   96871 command_runner.go:130] > # bind_mount_prefix = ""
	I0731 10:52:24.924634   96871 command_runner.go:130] > # If set to true, all containers will run in read-only mode.
	I0731 10:52:24.924641   96871 command_runner.go:130] > # read_only = false
	I0731 10:52:24.924651   96871 command_runner.go:130] > # Changes the verbosity of the logs based on the level it is set to. Options
	I0731 10:52:24.924665   96871 command_runner.go:130] > # are fatal, panic, error, warn, info, debug and trace. This option supports
	I0731 10:52:24.924677   96871 command_runner.go:130] > # live configuration reload.
	I0731 10:52:24.924687   96871 command_runner.go:130] > # log_level = "info"
	I0731 10:52:24.924699   96871 command_runner.go:130] > # Filter the log messages by the provided regular expression.
	I0731 10:52:24.924710   96871 command_runner.go:130] > # This option supports live configuration reload.
	I0731 10:52:24.924720   96871 command_runner.go:130] > # log_filter = ""
	I0731 10:52:24.924730   96871 command_runner.go:130] > # The UID mappings for the user namespace of each container. A range is
	I0731 10:52:24.924740   96871 command_runner.go:130] > # specified in the form containerUID:HostUID:Size. Multiple ranges must be
	I0731 10:52:24.924751   96871 command_runner.go:130] > # separated by comma.
	I0731 10:52:24.924761   96871 command_runner.go:130] > # uid_mappings = ""
	I0731 10:52:24.924772   96871 command_runner.go:130] > # The GID mappings for the user namespace of each container. A range is
	I0731 10:52:24.924788   96871 command_runner.go:130] > # specified in the form containerGID:HostGID:Size. Multiple ranges must be
	I0731 10:52:24.924798   96871 command_runner.go:130] > # separated by comma.
	I0731 10:52:24.924807   96871 command_runner.go:130] > # gid_mappings = ""
	I0731 10:52:24.924820   96871 command_runner.go:130] > # If set, CRI-O will reject any attempt to map host UIDs below this value
	I0731 10:52:24.924831   96871 command_runner.go:130] > # into user namespaces.  A negative value indicates that no minimum is set,
	I0731 10:52:24.924843   96871 command_runner.go:130] > # so specifying mappings will only be allowed for pods that run as UID 0.
	I0731 10:52:24.924854   96871 command_runner.go:130] > # minimum_mappable_uid = -1
	I0731 10:52:24.924868   96871 command_runner.go:130] > # If set, CRI-O will reject any attempt to map host GIDs below this value
	I0731 10:52:24.924885   96871 command_runner.go:130] > # into user namespaces.  A negative value indicates that no minimum is set,
	I0731 10:52:24.924899   96871 command_runner.go:130] > # so specifying mappings will only be allowed for pods that run as UID 0.
	I0731 10:52:24.924908   96871 command_runner.go:130] > # minimum_mappable_gid = -1
	I0731 10:52:24.924943   96871 command_runner.go:130] > # The minimal amount of time in seconds to wait before issuing a timeout
	I0731 10:52:24.924959   96871 command_runner.go:130] > # regarding the proper termination of the container. The lowest possible
	I0731 10:52:24.924969   96871 command_runner.go:130] > # value is 30s, whereas lower values are not considered by CRI-O.
	I0731 10:52:24.924980   96871 command_runner.go:130] > # ctr_stop_timeout = 30
	I0731 10:52:24.924993   96871 command_runner.go:130] > # drop_infra_ctr determines whether CRI-O drops the infra container
	I0731 10:52:24.925006   96871 command_runner.go:130] > # when a pod does not have a private PID namespace, and does not use
	I0731 10:52:24.925020   96871 command_runner.go:130] > # a kernel separating runtime (like kata).
	I0731 10:52:24.925033   96871 command_runner.go:130] > # It requires manage_ns_lifecycle to be true.
	I0731 10:52:24.925043   96871 command_runner.go:130] > # drop_infra_ctr = true
	I0731 10:52:24.925054   96871 command_runner.go:130] > # infra_ctr_cpuset determines what CPUs will be used to run infra containers.
	I0731 10:52:24.925067   96871 command_runner.go:130] > # You can use linux CPU list format to specify desired CPUs.
	I0731 10:52:24.925080   96871 command_runner.go:130] > # To get better isolation for guaranteed pods, set this parameter to be equal to kubelet reserved-cpus.
	I0731 10:52:24.925091   96871 command_runner.go:130] > # infra_ctr_cpuset = ""
	I0731 10:52:24.925101   96871 command_runner.go:130] > # The directory where the state of the managed namespaces gets tracked.
	I0731 10:52:24.925112   96871 command_runner.go:130] > # Only used when manage_ns_lifecycle is true.
	I0731 10:52:24.925122   96871 command_runner.go:130] > # namespaces_dir = "/var/run"
	I0731 10:52:24.925134   96871 command_runner.go:130] > # pinns_path is the path to find the pinns binary, which is needed to manage namespace lifecycle
	I0731 10:52:24.925140   96871 command_runner.go:130] > # pinns_path = ""
	I0731 10:52:24.925150   96871 command_runner.go:130] > # default_runtime is the _name_ of the OCI runtime to be used as the default.
	I0731 10:52:24.925164   96871 command_runner.go:130] > # The name is matched against the runtimes map below. If this value is changed,
	I0731 10:52:24.925178   96871 command_runner.go:130] > # the corresponding existing entry from the runtimes map below will be ignored.
	I0731 10:52:24.925189   96871 command_runner.go:130] > # default_runtime = "runc"
	I0731 10:52:24.925200   96871 command_runner.go:130] > # A list of paths that, when absent from the host,
	I0731 10:52:24.925216   96871 command_runner.go:130] > # will cause a container creation to fail (as opposed to the current behavior being created as a directory).
	I0731 10:52:24.925229   96871 command_runner.go:130] > # This option is to protect from source locations whose existence as a directory could jepordize the health of the node, and whose
	I0731 10:52:24.925242   96871 command_runner.go:130] > # creation as a file is not desired either.
	I0731 10:52:24.925256   96871 command_runner.go:130] > # An example is /etc/hostname, which will cause failures on reboot if it's created as a directory, but often doesn't exist because
	I0731 10:52:24.925265   96871 command_runner.go:130] > # the hostname is being managed dynamically.
	I0731 10:52:24.925277   96871 command_runner.go:130] > # absent_mount_sources_to_reject = [
	I0731 10:52:24.925283   96871 command_runner.go:130] > # ]
	I0731 10:52:24.925296   96871 command_runner.go:130] > # The "crio.runtime.runtimes" table defines a list of OCI compatible runtimes.
	I0731 10:52:24.925310   96871 command_runner.go:130] > # The runtime to use is picked based on the runtime handler provided by the CRI.
	I0731 10:52:24.925323   96871 command_runner.go:130] > # If no runtime handler is provided, the runtime will be picked based on the level
	I0731 10:52:24.925333   96871 command_runner.go:130] > # of trust of the workload. Each entry in the table should follow the format:
	I0731 10:52:24.925337   96871 command_runner.go:130] > #
	I0731 10:52:24.925344   96871 command_runner.go:130] > #[crio.runtime.runtimes.runtime-handler]
	I0731 10:52:24.925356   96871 command_runner.go:130] > #  runtime_path = "/path/to/the/executable"
	I0731 10:52:24.925364   96871 command_runner.go:130] > #  runtime_type = "oci"
	I0731 10:52:24.925375   96871 command_runner.go:130] > #  runtime_root = "/path/to/the/root"
	I0731 10:52:24.925386   96871 command_runner.go:130] > #  privileged_without_host_devices = false
	I0731 10:52:24.925396   96871 command_runner.go:130] > #  allowed_annotations = []
	I0731 10:52:24.925405   96871 command_runner.go:130] > # Where:
	I0731 10:52:24.925417   96871 command_runner.go:130] > # - runtime-handler: name used to identify the runtime
	I0731 10:52:24.925431   96871 command_runner.go:130] > # - runtime_path (optional, string): absolute path to the runtime executable in
	I0731 10:52:24.925446   96871 command_runner.go:130] > #   the host filesystem. If omitted, the runtime-handler identifier should match
	I0731 10:52:24.925463   96871 command_runner.go:130] > #   the runtime executable name, and the runtime executable should be placed
	I0731 10:52:24.925474   96871 command_runner.go:130] > #   in $PATH.
	I0731 10:52:24.925484   96871 command_runner.go:130] > # - runtime_type (optional, string): type of runtime, one of: "oci", "vm". If
	I0731 10:52:24.925496   96871 command_runner.go:130] > #   omitted, an "oci" runtime is assumed.
	I0731 10:52:24.925509   96871 command_runner.go:130] > # - runtime_root (optional, string): root directory for storage of containers
	I0731 10:52:24.925518   96871 command_runner.go:130] > #   state.
	I0731 10:52:24.925527   96871 command_runner.go:130] > # - runtime_config_path (optional, string): the path for the runtime configuration
	I0731 10:52:24.925536   96871 command_runner.go:130] > #   file. This can only be used with when using the VM runtime_type.
	I0731 10:52:24.925547   96871 command_runner.go:130] > # - privileged_without_host_devices (optional, bool): an option for restricting
	I0731 10:52:24.925560   96871 command_runner.go:130] > #   host devices from being passed to privileged containers.
	I0731 10:52:24.925571   96871 command_runner.go:130] > # - allowed_annotations (optional, array of strings): an option for specifying
	I0731 10:52:24.925586   96871 command_runner.go:130] > #   a list of experimental annotations that this runtime handler is allowed to process.
	I0731 10:52:24.925597   96871 command_runner.go:130] > #   The currently recognized values are:
	I0731 10:52:24.925611   96871 command_runner.go:130] > #   "io.kubernetes.cri-o.userns-mode" for configuring a user namespace for the pod.
	I0731 10:52:24.925625   96871 command_runner.go:130] > #   "io.kubernetes.cri-o.cgroup2-mount-hierarchy-rw" for mounting cgroups writably when set to "true".
	I0731 10:52:24.925633   96871 command_runner.go:130] > #   "io.kubernetes.cri-o.Devices" for configuring devices for the pod.
	I0731 10:52:24.925646   96871 command_runner.go:130] > #   "io.kubernetes.cri-o.ShmSize" for configuring the size of /dev/shm.
	I0731 10:52:24.925662   96871 command_runner.go:130] > #   "io.kubernetes.cri-o.UnifiedCgroup.$CTR_NAME" for configuring the cgroup v2 unified block for a container.
	I0731 10:52:24.925677   96871 command_runner.go:130] > #   "io.containers.trace-syscall" for tracing syscalls via the OCI seccomp BPF hook.
	I0731 10:52:24.925691   96871 command_runner.go:130] > #   "io.kubernetes.cri.rdt-class" for setting the RDT class of a container
	I0731 10:52:24.925705   96871 command_runner.go:130] > # - monitor_exec_cgroup (optional, string): if set to "container", indicates exec probes
	I0731 10:52:24.925720   96871 command_runner.go:130] > #   should be moved to the container's cgroup
	I0731 10:52:24.925728   96871 command_runner.go:130] > [crio.runtime.runtimes.runc]
	I0731 10:52:24.925733   96871 command_runner.go:130] > runtime_path = "/usr/lib/cri-o-runc/sbin/runc"
	I0731 10:52:24.925742   96871 command_runner.go:130] > runtime_type = "oci"
	I0731 10:52:24.925750   96871 command_runner.go:130] > runtime_root = "/run/runc"
	I0731 10:52:24.925761   96871 command_runner.go:130] > runtime_config_path = ""
	I0731 10:52:24.925768   96871 command_runner.go:130] > monitor_path = ""
	I0731 10:52:24.925778   96871 command_runner.go:130] > monitor_cgroup = ""
	I0731 10:52:24.925785   96871 command_runner.go:130] > monitor_exec_cgroup = ""
	I0731 10:52:24.925843   96871 command_runner.go:130] > # crun is a fast and lightweight fully featured OCI runtime and C library for
	I0731 10:52:24.925855   96871 command_runner.go:130] > # running containers
	I0731 10:52:24.925862   96871 command_runner.go:130] > #[crio.runtime.runtimes.crun]
	I0731 10:52:24.925873   96871 command_runner.go:130] > # Kata Containers is an OCI runtime, where containers are run inside lightweight
	I0731 10:52:24.925896   96871 command_runner.go:130] > # VMs. Kata provides additional isolation towards the host, minimizing the host attack
	I0731 10:52:24.925908   96871 command_runner.go:130] > # surface and mitigating the consequences of containers breakout.
	I0731 10:52:24.925920   96871 command_runner.go:130] > # Kata Containers with the default configured VMM
	I0731 10:52:24.925930   96871 command_runner.go:130] > #[crio.runtime.runtimes.kata-runtime]
	I0731 10:52:24.925938   96871 command_runner.go:130] > # Kata Containers with the QEMU VMM
	I0731 10:52:24.925951   96871 command_runner.go:130] > #[crio.runtime.runtimes.kata-qemu]
	I0731 10:52:24.925963   96871 command_runner.go:130] > # Kata Containers with the Firecracker VMM
	I0731 10:52:24.925970   96871 command_runner.go:130] > #[crio.runtime.runtimes.kata-fc]
	I0731 10:52:24.925985   96871 command_runner.go:130] > # The workloads table defines ways to customize containers with different resources
	I0731 10:52:24.925997   96871 command_runner.go:130] > # that work based on annotations, rather than the CRI.
	I0731 10:52:24.926010   96871 command_runner.go:130] > # Note, the behavior of this table is EXPERIMENTAL and may change at any time.
	I0731 10:52:24.926026   96871 command_runner.go:130] > # Each workload, has a name, activation_annotation, annotation_prefix and set of resources it supports mutating.
	I0731 10:52:24.926036   96871 command_runner.go:130] > # The currently supported resources are "cpu" (to configure the cpu shares) and "cpuset" to configure the cpuset.
	I0731 10:52:24.926044   96871 command_runner.go:130] > # Each resource can have a default value specified, or be empty.
	I0731 10:52:24.926063   96871 command_runner.go:130] > # For a container to opt-into this workload, the pod should be configured with the annotation $activation_annotation (key only, value is ignored).
	I0731 10:52:24.926080   96871 command_runner.go:130] > # To customize per-container, an annotation of the form $annotation_prefix.$resource/$ctrName = "value" can be specified
	I0731 10:52:24.926093   96871 command_runner.go:130] > # signifying for that resource type to override the default value.
	I0731 10:52:24.926108   96871 command_runner.go:130] > # If the annotation_prefix is not present, every container in the pod will be given the default values.
	I0731 10:52:24.926120   96871 command_runner.go:130] > # Example:
	I0731 10:52:24.926128   96871 command_runner.go:130] > # [crio.runtime.workloads.workload-type]
	I0731 10:52:24.926135   96871 command_runner.go:130] > # activation_annotation = "io.crio/workload"
	I0731 10:52:24.926147   96871 command_runner.go:130] > # annotation_prefix = "io.crio.workload-type"
	I0731 10:52:24.926159   96871 command_runner.go:130] > # [crio.runtime.workloads.workload-type.resources]
	I0731 10:52:24.926168   96871 command_runner.go:130] > # cpuset = 0
	I0731 10:52:24.926175   96871 command_runner.go:130] > # cpushares = "0-1"
	I0731 10:52:24.926184   96871 command_runner.go:130] > # Where:
	I0731 10:52:24.926204   96871 command_runner.go:130] > # The workload name is workload-type.
	I0731 10:52:24.926220   96871 command_runner.go:130] > # To specify, the pod must have the "io.crio.workload" annotation (this is a precise string match).
	I0731 10:52:24.926232   96871 command_runner.go:130] > # This workload supports setting cpuset and cpu resources.
	I0731 10:52:24.926245   96871 command_runner.go:130] > # annotation_prefix is used to customize the different resources.
	I0731 10:52:24.926261   96871 command_runner.go:130] > # To configure the cpu shares a container gets in the example above, the pod would have to have the following annotation:
	I0731 10:52:24.926269   96871 command_runner.go:130] > # "io.crio.workload-type/$container_name = {"cpushares": "value"}"
	I0731 10:52:24.926274   96871 command_runner.go:130] > # 
	I0731 10:52:24.926288   96871 command_runner.go:130] > # The crio.image table contains settings pertaining to the management of OCI images.
	I0731 10:52:24.926297   96871 command_runner.go:130] > #
	I0731 10:52:24.926311   96871 command_runner.go:130] > # CRI-O reads its configured registries defaults from the system wide
	I0731 10:52:24.926328   96871 command_runner.go:130] > # containers-registries.conf(5) located in /etc/containers/registries.conf. If
	I0731 10:52:24.926342   96871 command_runner.go:130] > # you want to modify just CRI-O, you can change the registries configuration in
	I0731 10:52:24.926356   96871 command_runner.go:130] > # this file. Otherwise, leave insecure_registries and registries commented out to
	I0731 10:52:24.926367   96871 command_runner.go:130] > # use the system's defaults from /etc/containers/registries.conf.
	I0731 10:52:24.926373   96871 command_runner.go:130] > [crio.image]
	I0731 10:52:24.926382   96871 command_runner.go:130] > # Default transport for pulling images from a remote container storage.
	I0731 10:52:24.926394   96871 command_runner.go:130] > # default_transport = "docker://"
	I0731 10:52:24.926404   96871 command_runner.go:130] > # The path to a file containing credentials necessary for pulling images from
	I0731 10:52:24.926418   96871 command_runner.go:130] > # secure registries. The file is similar to that of /var/lib/kubelet/config.json
	I0731 10:52:24.926428   96871 command_runner.go:130] > # global_auth_file = ""
	I0731 10:52:24.926442   96871 command_runner.go:130] > # The image used to instantiate infra containers.
	I0731 10:52:24.926458   96871 command_runner.go:130] > # This option supports live configuration reload.
	I0731 10:52:24.926466   96871 command_runner.go:130] > pause_image = "registry.k8s.io/pause:3.9"
	I0731 10:52:24.926475   96871 command_runner.go:130] > # The path to a file containing credentials specific for pulling the pause_image from
	I0731 10:52:24.926488   96871 command_runner.go:130] > # above. The file is similar to that of /var/lib/kubelet/config.json
	I0731 10:52:24.926497   96871 command_runner.go:130] > # This option supports live configuration reload.
	I0731 10:52:24.926508   96871 command_runner.go:130] > # pause_image_auth_file = ""
	I0731 10:52:24.926520   96871 command_runner.go:130] > # The command to run to have a container stay in the paused state.
	I0731 10:52:24.926538   96871 command_runner.go:130] > # When explicitly set to "", it will fallback to the entrypoint and command
	I0731 10:52:24.926551   96871 command_runner.go:130] > # specified in the pause image. When commented out, it will fallback to the
	I0731 10:52:24.926560   96871 command_runner.go:130] > # default: "/pause". This option supports live configuration reload.
	I0731 10:52:24.926570   96871 command_runner.go:130] > # pause_command = "/pause"
	I0731 10:52:24.926583   96871 command_runner.go:130] > # Path to the file which decides what sort of policy we use when deciding
	I0731 10:52:24.926604   96871 command_runner.go:130] > # whether or not to trust an image that we've pulled. It is not recommended that
	I0731 10:52:24.926618   96871 command_runner.go:130] > # this option be used, as the default behavior of using the system-wide default
	I0731 10:52:24.926631   96871 command_runner.go:130] > # policy (i.e., /etc/containers/policy.json) is most often preferred. Please
	I0731 10:52:24.926643   96871 command_runner.go:130] > # refer to containers-policy.json(5) for more details.
	I0731 10:52:24.926653   96871 command_runner.go:130] > # signature_policy = ""
	I0731 10:52:24.926663   96871 command_runner.go:130] > # List of registries to skip TLS verification for pulling images. Please
	I0731 10:52:24.926679   96871 command_runner.go:130] > # consider configuring the registries via /etc/containers/registries.conf before
	I0731 10:52:24.926690   96871 command_runner.go:130] > # changing them here.
	I0731 10:52:24.926697   96871 command_runner.go:130] > # insecure_registries = [
	I0731 10:52:24.926706   96871 command_runner.go:130] > # ]
	I0731 10:52:24.926716   96871 command_runner.go:130] > # Controls how image volumes are handled. The valid values are mkdir, bind and
	I0731 10:52:24.926728   96871 command_runner.go:130] > # ignore; the latter will ignore volumes entirely.
	I0731 10:52:24.926741   96871 command_runner.go:130] > # image_volumes = "mkdir"
	I0731 10:52:24.926756   96871 command_runner.go:130] > # Temporary directory to use for storing big files
	I0731 10:52:24.926764   96871 command_runner.go:130] > # big_files_temporary_dir = ""
	I0731 10:52:24.926772   96871 command_runner.go:130] > # The crio.network table containers settings pertaining to the management of
	I0731 10:52:24.926782   96871 command_runner.go:130] > # CNI plugins.
	I0731 10:52:24.926793   96871 command_runner.go:130] > [crio.network]
	I0731 10:52:24.926806   96871 command_runner.go:130] > # The default CNI network name to be selected. If not set or "", then
	I0731 10:52:24.926818   96871 command_runner.go:130] > # CRI-O will pick-up the first one found in network_dir.
	I0731 10:52:24.926828   96871 command_runner.go:130] > # cni_default_network = ""
	I0731 10:52:24.926840   96871 command_runner.go:130] > # Path to the directory where CNI configuration files are located.
	I0731 10:52:24.926848   96871 command_runner.go:130] > # network_dir = "/etc/cni/net.d/"
	I0731 10:52:24.926859   96871 command_runner.go:130] > # Paths to directories where CNI plugin binaries are located.
	I0731 10:52:24.926869   96871 command_runner.go:130] > # plugin_dirs = [
	I0731 10:52:24.926876   96871 command_runner.go:130] > # 	"/opt/cni/bin/",
	I0731 10:52:24.926885   96871 command_runner.go:130] > # ]
	I0731 10:52:24.926898   96871 command_runner.go:130] > # A necessary configuration for Prometheus based metrics retrieval
	I0731 10:52:24.926907   96871 command_runner.go:130] > [crio.metrics]
	I0731 10:52:24.926919   96871 command_runner.go:130] > # Globally enable or disable metrics support.
	I0731 10:52:24.926928   96871 command_runner.go:130] > # enable_metrics = false
	I0731 10:52:24.926940   96871 command_runner.go:130] > # Specify enabled metrics collectors.
	I0731 10:52:24.926948   96871 command_runner.go:130] > # Per default all metrics are enabled.
	I0731 10:52:24.926962   96871 command_runner.go:130] > # It is possible, to prefix the metrics with "container_runtime_" and "crio_".
	I0731 10:52:24.926976   96871 command_runner.go:130] > # For example, the metrics collector "operations" would be treated in the same
	I0731 10:52:24.926990   96871 command_runner.go:130] > # way as "crio_operations" and "container_runtime_crio_operations".
	I0731 10:52:24.927000   96871 command_runner.go:130] > # metrics_collectors = [
	I0731 10:52:24.927009   96871 command_runner.go:130] > # 	"operations",
	I0731 10:52:24.927020   96871 command_runner.go:130] > # 	"operations_latency_microseconds_total",
	I0731 10:52:24.927030   96871 command_runner.go:130] > # 	"operations_latency_microseconds",
	I0731 10:52:24.927039   96871 command_runner.go:130] > # 	"operations_errors",
	I0731 10:52:24.927046   96871 command_runner.go:130] > # 	"image_pulls_by_digest",
	I0731 10:52:24.927052   96871 command_runner.go:130] > # 	"image_pulls_by_name",
	I0731 10:52:24.927064   96871 command_runner.go:130] > # 	"image_pulls_by_name_skipped",
	I0731 10:52:24.927074   96871 command_runner.go:130] > # 	"image_pulls_failures",
	I0731 10:52:24.927083   96871 command_runner.go:130] > # 	"image_pulls_successes",
	I0731 10:52:24.927093   96871 command_runner.go:130] > # 	"image_pulls_layer_size",
	I0731 10:52:24.927100   96871 command_runner.go:130] > # 	"image_layer_reuse",
	I0731 10:52:24.927111   96871 command_runner.go:130] > # 	"containers_oom_total",
	I0731 10:52:24.927124   96871 command_runner.go:130] > # 	"containers_oom",
	I0731 10:52:24.927131   96871 command_runner.go:130] > # 	"processes_defunct",
	I0731 10:52:24.927137   96871 command_runner.go:130] > # 	"operations_total",
	I0731 10:52:24.927148   96871 command_runner.go:130] > # 	"operations_latency_seconds",
	I0731 10:52:24.927159   96871 command_runner.go:130] > # 	"operations_latency_seconds_total",
	I0731 10:52:24.927170   96871 command_runner.go:130] > # 	"operations_errors_total",
	I0731 10:52:24.927180   96871 command_runner.go:130] > # 	"image_pulls_bytes_total",
	I0731 10:52:24.927191   96871 command_runner.go:130] > # 	"image_pulls_skipped_bytes_total",
	I0731 10:52:24.927204   96871 command_runner.go:130] > # 	"image_pulls_failure_total",
	I0731 10:52:24.927212   96871 command_runner.go:130] > # 	"image_pulls_success_total",
	I0731 10:52:24.927216   96871 command_runner.go:130] > # 	"image_layer_reuse_total",
	I0731 10:52:24.927226   96871 command_runner.go:130] > # 	"containers_oom_count_total",
	I0731 10:52:24.927235   96871 command_runner.go:130] > # ]
	I0731 10:52:24.927244   96871 command_runner.go:130] > # The port on which the metrics server will listen.
	I0731 10:52:24.927254   96871 command_runner.go:130] > # metrics_port = 9090
	I0731 10:52:24.927266   96871 command_runner.go:130] > # Local socket path to bind the metrics server to
	I0731 10:52:24.927276   96871 command_runner.go:130] > # metrics_socket = ""
	I0731 10:52:24.927287   96871 command_runner.go:130] > # The certificate for the secure metrics server.
	I0731 10:52:24.927303   96871 command_runner.go:130] > # If the certificate is not available on disk, then CRI-O will generate a
	I0731 10:52:24.927311   96871 command_runner.go:130] > # self-signed one. CRI-O also watches for changes of this path and reloads the
	I0731 10:52:24.927322   96871 command_runner.go:130] > # certificate on any modification event.
	I0731 10:52:24.927332   96871 command_runner.go:130] > # metrics_cert = ""
	I0731 10:52:24.927344   96871 command_runner.go:130] > # The certificate key for the secure metrics server.
	I0731 10:52:24.927356   96871 command_runner.go:130] > # Behaves in the same way as the metrics_cert.
	I0731 10:52:24.927366   96871 command_runner.go:130] > # metrics_key = ""
	I0731 10:52:24.927379   96871 command_runner.go:130] > # A necessary configuration for OpenTelemetry trace data exporting
	I0731 10:52:24.927388   96871 command_runner.go:130] > [crio.tracing]
	I0731 10:52:24.927400   96871 command_runner.go:130] > # Globally enable or disable exporting OpenTelemetry traces.
	I0731 10:52:24.927409   96871 command_runner.go:130] > # enable_tracing = false
	I0731 10:52:24.927416   96871 command_runner.go:130] > # Address on which the gRPC trace collector listens on.
	I0731 10:52:24.927427   96871 command_runner.go:130] > # tracing_endpoint = "0.0.0.0:4317"
	I0731 10:52:24.927439   96871 command_runner.go:130] > # Number of samples to collect per million spans.
	I0731 10:52:24.927455   96871 command_runner.go:130] > # tracing_sampling_rate_per_million = 0
	I0731 10:52:24.927469   96871 command_runner.go:130] > # Necessary information pertaining to container and pod stats reporting.
	I0731 10:52:24.927478   96871 command_runner.go:130] > [crio.stats]
	I0731 10:52:24.927490   96871 command_runner.go:130] > # The number of seconds between collecting pod and container stats.
	I0731 10:52:24.927505   96871 command_runner.go:130] > # If set to 0, the stats are collected on-demand instead.
	I0731 10:52:24.927512   96871 command_runner.go:130] > # stats_collection_period = 0
	I0731 10:52:24.927542   96871 command_runner.go:130] ! time="2023-07-31 10:52:24.920150156Z" level=info msg="Starting CRI-O, version: 1.24.6, git: 4bfe15a9feb74ffc95e66a21c04b15fa7bbc2b90(clean)"
	I0731 10:52:24.927563   96871 command_runner.go:130] ! level=info msg="Using default capabilities: CAP_CHOWN, CAP_DAC_OVERRIDE, CAP_FSETID, CAP_FOWNER, CAP_SETGID, CAP_SETUID, CAP_SETPCAP, CAP_NET_BIND_SERVICE, CAP_KILL"
	I0731 10:52:24.927648   96871 cni.go:84] Creating CNI manager for ""
	I0731 10:52:24.927663   96871 cni.go:136] 1 nodes found, recommending kindnet
	I0731 10:52:24.927676   96871 kubeadm.go:87] Using pod CIDR: 10.244.0.0/16
	I0731 10:52:24.927701   96871 kubeadm.go:176] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.58.2 APIServerPort:8443 KubernetesVersion:v1.27.3 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:multinode-776386 NodeName:multinode-776386 DNSDomain:cluster.local CRISocket:/var/run/crio/crio.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.58.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.58.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/k
ubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
	I0731 10:52:24.927893   96871 kubeadm.go:181] kubeadm config:
	apiVersion: kubeadm.k8s.io/v1beta3
	kind: InitConfiguration
	localAPIEndpoint:
	  advertiseAddress: 192.168.58.2
	  bindPort: 8443
	bootstrapTokens:
	  - groups:
	      - system:bootstrappers:kubeadm:default-node-token
	    ttl: 24h0m0s
	    usages:
	      - signing
	      - authentication
	nodeRegistration:
	  criSocket: unix:///var/run/crio/crio.sock
	  name: "multinode-776386"
	  kubeletExtraArgs:
	    node-ip: 192.168.58.2
	  taints: []
	---
	apiVersion: kubeadm.k8s.io/v1beta3
	kind: ClusterConfiguration
	apiServer:
	  certSANs: ["127.0.0.1", "localhost", "192.168.58.2"]
	  extraArgs:
	    enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
	controllerManager:
	  extraArgs:
	    allocate-node-cidrs: "true"
	    leader-elect: "false"
	scheduler:
	  extraArgs:
	    leader-elect: "false"
	certificatesDir: /var/lib/minikube/certs
	clusterName: mk
	controlPlaneEndpoint: control-plane.minikube.internal:8443
	etcd:
	  local:
	    dataDir: /var/lib/minikube/etcd
	    extraArgs:
	      proxy-refresh-interval: "70000"
	kubernetesVersion: v1.27.3
	networking:
	  dnsDomain: cluster.local
	  podSubnet: "10.244.0.0/16"
	  serviceSubnet: 10.96.0.0/12
	---
	apiVersion: kubelet.config.k8s.io/v1beta1
	kind: KubeletConfiguration
	authentication:
	  x509:
	    clientCAFile: /var/lib/minikube/certs/ca.crt
	cgroupDriver: cgroupfs
	hairpinMode: hairpin-veth
	runtimeRequestTimeout: 15m
	clusterDomain: "cluster.local"
	# disable disk resource management by default
	imageGCHighThresholdPercent: 100
	evictionHard:
	  nodefs.available: "0%!"(MISSING)
	  nodefs.inodesFree: "0%!"(MISSING)
	  imagefs.available: "0%!"(MISSING)
	failSwapOn: false
	staticPodPath: /etc/kubernetes/manifests
	---
	apiVersion: kubeproxy.config.k8s.io/v1alpha1
	kind: KubeProxyConfiguration
	clusterCIDR: "10.244.0.0/16"
	metricsBindAddress: 0.0.0.0:10249
	conntrack:
	  maxPerCore: 0
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
	  tcpEstablishedTimeout: 0s
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
	  tcpCloseWaitTimeout: 0s
	
	I0731 10:52:24.927995   96871 kubeadm.go:976] kubelet [Unit]
	Wants=crio.service
	
	[Service]
	ExecStart=
	ExecStart=/var/lib/minikube/binaries/v1.27.3/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroups-per-qos=false --config=/var/lib/kubelet/config.yaml --container-runtime-endpoint=unix:///var/run/crio/crio.sock --enforce-node-allocatable= --hostname-override=multinode-776386 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.58.2
	
	[Install]
	 config:
	{KubernetesVersion:v1.27.3 ClusterName:multinode-776386 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:}
	I0731 10:52:24.928054   96871 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.27.3
	I0731 10:52:24.934990   96871 command_runner.go:130] > kubeadm
	I0731 10:52:24.935003   96871 command_runner.go:130] > kubectl
	I0731 10:52:24.935007   96871 command_runner.go:130] > kubelet
	I0731 10:52:24.935600   96871 binaries.go:44] Found k8s binaries, skipping transfer
	I0731 10:52:24.935656   96871 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
	I0731 10:52:24.942721   96871 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (426 bytes)
	I0731 10:52:24.957509   96871 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
	I0731 10:52:24.972433   96871 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2097 bytes)
	I0731 10:52:24.986570   96871 ssh_runner.go:195] Run: grep 192.168.58.2	control-plane.minikube.internal$ /etc/hosts
	I0731 10:52:24.989249   96871 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.58.2	control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I0731 10:52:24.997840   96871 certs.go:56] Setting up /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386 for IP: 192.168.58.2
	I0731 10:52:24.997869   96871 certs.go:190] acquiring lock for shared ca certs: {Name:mke1f008d411b97835fe7ef4c9ac6bdba0705009 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0731 10:52:24.997995   96871 certs.go:199] skipping minikubeCA CA generation: /home/jenkins/minikube-integration/16969-5799/.minikube/ca.key
	I0731 10:52:24.998046   96871 certs.go:199] skipping proxyClientCA CA generation: /home/jenkins/minikube-integration/16969-5799/.minikube/proxy-client-ca.key
	I0731 10:52:24.998099   96871 certs.go:319] generating minikube-user signed cert: /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/client.key
	I0731 10:52:24.998121   96871 crypto.go:68] Generating cert /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/client.crt with IP's: []
	I0731 10:52:25.126550   96871 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/client.crt ...
	I0731 10:52:25.126581   96871 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/client.crt: {Name:mk3945ebf2b2fa4ad759b5d9e313d886f6daab88 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0731 10:52:25.126780   96871 crypto.go:164] Writing key to /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/client.key ...
	I0731 10:52:25.126797   96871 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/client.key: {Name:mkcd5735be5a1cf8e41f7d6ffd2036c7858b6e72 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0731 10:52:25.126910   96871 certs.go:319] generating minikube signed cert: /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/apiserver.key.cee25041
	I0731 10:52:25.126930   96871 crypto.go:68] Generating cert /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/apiserver.crt.cee25041 with IP's: [192.168.58.2 10.96.0.1 127.0.0.1 10.0.0.1]
	I0731 10:52:25.273772   96871 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/apiserver.crt.cee25041 ...
	I0731 10:52:25.273803   96871 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/apiserver.crt.cee25041: {Name:mk814487c30eddb4430d707c703f83dde10737b9 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0731 10:52:25.273998   96871 crypto.go:164] Writing key to /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/apiserver.key.cee25041 ...
	I0731 10:52:25.274013   96871 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/apiserver.key.cee25041: {Name:mke8fb4309cb4487f669396d55b7363d0e3ab660 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0731 10:52:25.274113   96871 certs.go:337] copying /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/apiserver.crt.cee25041 -> /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/apiserver.crt
	I0731 10:52:25.274201   96871 certs.go:341] copying /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/apiserver.key.cee25041 -> /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/apiserver.key
	I0731 10:52:25.274253   96871 certs.go:319] generating aggregator signed cert: /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/proxy-client.key
	I0731 10:52:25.274266   96871 crypto.go:68] Generating cert /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/proxy-client.crt with IP's: []
	I0731 10:52:25.375226   96871 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/proxy-client.crt ...
	I0731 10:52:25.375252   96871 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/proxy-client.crt: {Name:mk1a07c18bd53eab9d072eeac8aecbf898f98c15 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0731 10:52:25.375417   96871 crypto.go:164] Writing key to /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/proxy-client.key ...
	I0731 10:52:25.375432   96871 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/proxy-client.key: {Name:mk3ebdd149fd1333d7a1f8223771267fe6b644f4 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0731 10:52:25.375524   96871 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/apiserver.crt -> /var/lib/minikube/certs/apiserver.crt
	I0731 10:52:25.375545   96871 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/apiserver.key -> /var/lib/minikube/certs/apiserver.key
	I0731 10:52:25.375558   96871 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/proxy-client.crt -> /var/lib/minikube/certs/proxy-client.crt
	I0731 10:52:25.375570   96871 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/proxy-client.key -> /var/lib/minikube/certs/proxy-client.key
	I0731 10:52:25.375582   96871 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/ca.crt -> /var/lib/minikube/certs/ca.crt
	I0731 10:52:25.375601   96871 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/ca.key -> /var/lib/minikube/certs/ca.key
	I0731 10:52:25.375615   96871 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/proxy-client-ca.crt -> /var/lib/minikube/certs/proxy-client-ca.crt
	I0731 10:52:25.375627   96871 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/proxy-client-ca.key -> /var/lib/minikube/certs/proxy-client-ca.key
	I0731 10:52:25.375681   96871 certs.go:437] found cert: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/home/jenkins/minikube-integration/16969-5799/.minikube/certs/12537.pem (1338 bytes)
	W0731 10:52:25.375714   96871 certs.go:433] ignoring /home/jenkins/minikube-integration/16969-5799/.minikube/certs/home/jenkins/minikube-integration/16969-5799/.minikube/certs/12537_empty.pem, impossibly tiny 0 bytes
	I0731 10:52:25.375726   96871 certs.go:437] found cert: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca-key.pem (1675 bytes)
	I0731 10:52:25.375749   96871 certs.go:437] found cert: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem (1082 bytes)
	I0731 10:52:25.375774   96871 certs.go:437] found cert: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/home/jenkins/minikube-integration/16969-5799/.minikube/certs/cert.pem (1123 bytes)
	I0731 10:52:25.375798   96871 certs.go:437] found cert: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/home/jenkins/minikube-integration/16969-5799/.minikube/certs/key.pem (1675 bytes)
	I0731 10:52:25.375836   96871 certs.go:437] found cert: /home/jenkins/minikube-integration/16969-5799/.minikube/files/etc/ssl/certs/home/jenkins/minikube-integration/16969-5799/.minikube/files/etc/ssl/certs/125372.pem (1708 bytes)
	I0731 10:52:25.375860   96871 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/files/etc/ssl/certs/125372.pem -> /usr/share/ca-certificates/125372.pem
	I0731 10:52:25.375873   96871 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/ca.crt -> /usr/share/ca-certificates/minikubeCA.pem
	I0731 10:52:25.375884   96871 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/12537.pem -> /usr/share/ca-certificates/12537.pem
	I0731 10:52:25.376356   96871 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1399 bytes)
	I0731 10:52:25.397063   96871 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
	I0731 10:52:25.416405   96871 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
	I0731 10:52:25.435948   96871 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
	I0731 10:52:25.455150   96871 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
	I0731 10:52:25.474261   96871 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
	I0731 10:52:25.493352   96871 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
	I0731 10:52:25.512267   96871 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
	I0731 10:52:25.531228   96871 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/files/etc/ssl/certs/125372.pem --> /usr/share/ca-certificates/125372.pem (1708 bytes)
	I0731 10:52:25.550401   96871 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
	I0731 10:52:25.569535   96871 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/certs/12537.pem --> /usr/share/ca-certificates/12537.pem (1338 bytes)
	I0731 10:52:25.588218   96871 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
	I0731 10:52:25.602252   96871 ssh_runner.go:195] Run: openssl version
	I0731 10:52:25.606705   96871 command_runner.go:130] > OpenSSL 3.0.2 15 Mar 2022 (Library: OpenSSL 3.0.2 15 Mar 2022)
	I0731 10:52:25.606755   96871 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/12537.pem && ln -fs /usr/share/ca-certificates/12537.pem /etc/ssl/certs/12537.pem"
	I0731 10:52:25.614368   96871 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/12537.pem
	I0731 10:52:25.617181   96871 command_runner.go:130] > -rw-r--r-- 1 root root 1338 Jul 31 10:39 /usr/share/ca-certificates/12537.pem
	I0731 10:52:25.617202   96871 certs.go:480] hashing: -rw-r--r-- 1 root root 1338 Jul 31 10:39 /usr/share/ca-certificates/12537.pem
	I0731 10:52:25.617235   96871 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/12537.pem
	I0731 10:52:25.623082   96871 command_runner.go:130] > 51391683
	I0731 10:52:25.623130   96871 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/12537.pem /etc/ssl/certs/51391683.0"
	I0731 10:52:25.630526   96871 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/125372.pem && ln -fs /usr/share/ca-certificates/125372.pem /etc/ssl/certs/125372.pem"
	I0731 10:52:25.637932   96871 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/125372.pem
	I0731 10:52:25.640656   96871 command_runner.go:130] > -rw-r--r-- 1 root root 1708 Jul 31 10:39 /usr/share/ca-certificates/125372.pem
	I0731 10:52:25.640696   96871 certs.go:480] hashing: -rw-r--r-- 1 root root 1708 Jul 31 10:39 /usr/share/ca-certificates/125372.pem
	I0731 10:52:25.640729   96871 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/125372.pem
	I0731 10:52:25.646183   96871 command_runner.go:130] > 3ec20f2e
	I0731 10:52:25.646433   96871 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/125372.pem /etc/ssl/certs/3ec20f2e.0"
	I0731 10:52:25.653838   96871 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
	I0731 10:52:25.661146   96871 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
	I0731 10:52:25.663925   96871 command_runner.go:130] > -rw-r--r-- 1 root root 1111 Jul 31 10:34 /usr/share/ca-certificates/minikubeCA.pem
	I0731 10:52:25.663962   96871 certs.go:480] hashing: -rw-r--r-- 1 root root 1111 Jul 31 10:34 /usr/share/ca-certificates/minikubeCA.pem
	I0731 10:52:25.663990   96871 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
	I0731 10:52:25.669870   96871 command_runner.go:130] > b5213941
	I0731 10:52:25.669928   96871 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
	I0731 10:52:25.677311   96871 ssh_runner.go:195] Run: ls /var/lib/minikube/certs/etcd
	I0731 10:52:25.679936   96871 command_runner.go:130] ! ls: cannot access '/var/lib/minikube/certs/etcd': No such file or directory
	I0731 10:52:25.679985   96871 certs.go:353] certs directory doesn't exist, likely first start: ls /var/lib/minikube/certs/etcd: Process exited with status 2
	stdout:
	
	stderr:
	ls: cannot access '/var/lib/minikube/certs/etcd': No such file or directory
	I0731 10:52:25.680031   96871 kubeadm.go:404] StartCluster: {Name:multinode-776386 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 Memory:2200 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.27.3 ClusterName:multinode-776386 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDoma
in:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP:192.168.58.2 Port:8443 KubernetesVersion:v1.27.3 ContainerRuntime:crio ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirm
warePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0}
	I0731 10:52:25.680112   96871 cri.go:54] listing CRI containers in root : {State:paused Name: Namespaces:[kube-system]}
	I0731 10:52:25.680154   96871 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
	I0731 10:52:25.711712   96871 cri.go:89] found id: ""
	I0731 10:52:25.711754   96871 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
	I0731 10:52:25.718961   96871 command_runner.go:130] ! ls: cannot access '/var/lib/kubelet/kubeadm-flags.env': No such file or directory
	I0731 10:52:25.718981   96871 command_runner.go:130] ! ls: cannot access '/var/lib/kubelet/config.yaml': No such file or directory
	I0731 10:52:25.718987   96871 command_runner.go:130] ! ls: cannot access '/var/lib/minikube/etcd': No such file or directory
	I0731 10:52:25.719034   96871 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
	I0731 10:52:25.726040   96871 kubeadm.go:226] ignoring SystemVerification for kubeadm because of docker driver
	I0731 10:52:25.726088   96871 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
	I0731 10:52:25.732967   96871 command_runner.go:130] ! ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
	I0731 10:52:25.732984   96871 command_runner.go:130] ! ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
	I0731 10:52:25.732991   96871 command_runner.go:130] ! ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
	I0731 10:52:25.732999   96871 command_runner.go:130] ! ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
	I0731 10:52:25.733032   96871 kubeadm.go:152] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
	stdout:
	
	stderr:
	ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
	I0731 10:52:25.733097   96871 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.27.3:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml  --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
	I0731 10:52:25.809836   96871 kubeadm.go:322] 	[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1038-gcp\n", err: exit status 1
	I0731 10:52:25.809868   96871 command_runner.go:130] ! 	[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1038-gcp\n", err: exit status 1
	I0731 10:52:25.871320   96871 kubeadm.go:322] 	[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
	I0731 10:52:25.871346   96871 command_runner.go:130] ! 	[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
	I0731 10:52:34.555031   96871 kubeadm.go:322] [init] Using Kubernetes version: v1.27.3
	I0731 10:52:34.555060   96871 command_runner.go:130] > [init] Using Kubernetes version: v1.27.3
	I0731 10:52:34.555108   96871 kubeadm.go:322] [preflight] Running pre-flight checks
	I0731 10:52:34.555115   96871 command_runner.go:130] > [preflight] Running pre-flight checks
	I0731 10:52:34.555214   96871 kubeadm.go:322] [preflight] The system verification failed. Printing the output from the verification:
	I0731 10:52:34.555222   96871 command_runner.go:130] > [preflight] The system verification failed. Printing the output from the verification:
	I0731 10:52:34.555282   96871 kubeadm.go:322] KERNEL_VERSION: 5.15.0-1038-gcp
	I0731 10:52:34.555289   96871 command_runner.go:130] > KERNEL_VERSION: 5.15.0-1038-gcp
	I0731 10:52:34.555330   96871 kubeadm.go:322] OS: Linux
	I0731 10:52:34.555337   96871 command_runner.go:130] > OS: Linux
	I0731 10:52:34.555391   96871 kubeadm.go:322] CGROUPS_CPU: enabled
	I0731 10:52:34.555397   96871 command_runner.go:130] > CGROUPS_CPU: enabled
	I0731 10:52:34.555458   96871 kubeadm.go:322] CGROUPS_CPUACCT: enabled
	I0731 10:52:34.555470   96871 command_runner.go:130] > CGROUPS_CPUACCT: enabled
	I0731 10:52:34.555524   96871 kubeadm.go:322] CGROUPS_CPUSET: enabled
	I0731 10:52:34.555531   96871 command_runner.go:130] > CGROUPS_CPUSET: enabled
	I0731 10:52:34.555583   96871 kubeadm.go:322] CGROUPS_DEVICES: enabled
	I0731 10:52:34.555590   96871 command_runner.go:130] > CGROUPS_DEVICES: enabled
	I0731 10:52:34.555644   96871 kubeadm.go:322] CGROUPS_FREEZER: enabled
	I0731 10:52:34.555650   96871 command_runner.go:130] > CGROUPS_FREEZER: enabled
	I0731 10:52:34.555708   96871 kubeadm.go:322] CGROUPS_MEMORY: enabled
	I0731 10:52:34.555714   96871 command_runner.go:130] > CGROUPS_MEMORY: enabled
	I0731 10:52:34.555772   96871 kubeadm.go:322] CGROUPS_PIDS: enabled
	I0731 10:52:34.555779   96871 command_runner.go:130] > CGROUPS_PIDS: enabled
	I0731 10:52:34.555843   96871 kubeadm.go:322] CGROUPS_HUGETLB: enabled
	I0731 10:52:34.555850   96871 command_runner.go:130] > CGROUPS_HUGETLB: enabled
	I0731 10:52:34.555913   96871 kubeadm.go:322] CGROUPS_BLKIO: enabled
	I0731 10:52:34.555920   96871 command_runner.go:130] > CGROUPS_BLKIO: enabled
	I0731 10:52:34.556010   96871 kubeadm.go:322] [preflight] Pulling images required for setting up a Kubernetes cluster
	I0731 10:52:34.556019   96871 command_runner.go:130] > [preflight] Pulling images required for setting up a Kubernetes cluster
	I0731 10:52:34.556131   96871 kubeadm.go:322] [preflight] This might take a minute or two, depending on the speed of your internet connection
	I0731 10:52:34.556139   96871 command_runner.go:130] > [preflight] This might take a minute or two, depending on the speed of your internet connection
	I0731 10:52:34.556249   96871 kubeadm.go:322] [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
	I0731 10:52:34.556256   96871 command_runner.go:130] > [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
	I0731 10:52:34.556327   96871 kubeadm.go:322] [certs] Using certificateDir folder "/var/lib/minikube/certs"
	I0731 10:52:34.557638   96871 out.go:204]   - Generating certificates and keys ...
	I0731 10:52:34.556488   96871 command_runner.go:130] > [certs] Using certificateDir folder "/var/lib/minikube/certs"
	I0731 10:52:34.557737   96871 kubeadm.go:322] [certs] Using existing ca certificate authority
	I0731 10:52:34.557751   96871 command_runner.go:130] > [certs] Using existing ca certificate authority
	I0731 10:52:34.557827   96871 kubeadm.go:322] [certs] Using existing apiserver certificate and key on disk
	I0731 10:52:34.557839   96871 command_runner.go:130] > [certs] Using existing apiserver certificate and key on disk
	I0731 10:52:34.557921   96871 kubeadm.go:322] [certs] Generating "apiserver-kubelet-client" certificate and key
	I0731 10:52:34.557935   96871 command_runner.go:130] > [certs] Generating "apiserver-kubelet-client" certificate and key
	I0731 10:52:34.558007   96871 kubeadm.go:322] [certs] Generating "front-proxy-ca" certificate and key
	I0731 10:52:34.558017   96871 command_runner.go:130] > [certs] Generating "front-proxy-ca" certificate and key
	I0731 10:52:34.558095   96871 kubeadm.go:322] [certs] Generating "front-proxy-client" certificate and key
	I0731 10:52:34.558107   96871 command_runner.go:130] > [certs] Generating "front-proxy-client" certificate and key
	I0731 10:52:34.558164   96871 kubeadm.go:322] [certs] Generating "etcd/ca" certificate and key
	I0731 10:52:34.558173   96871 command_runner.go:130] > [certs] Generating "etcd/ca" certificate and key
	I0731 10:52:34.558254   96871 kubeadm.go:322] [certs] Generating "etcd/server" certificate and key
	I0731 10:52:34.558264   96871 command_runner.go:130] > [certs] Generating "etcd/server" certificate and key
	I0731 10:52:34.558409   96871 kubeadm.go:322] [certs] etcd/server serving cert is signed for DNS names [localhost multinode-776386] and IPs [192.168.58.2 127.0.0.1 ::1]
	I0731 10:52:34.558421   96871 command_runner.go:130] > [certs] etcd/server serving cert is signed for DNS names [localhost multinode-776386] and IPs [192.168.58.2 127.0.0.1 ::1]
	I0731 10:52:34.558479   96871 kubeadm.go:322] [certs] Generating "etcd/peer" certificate and key
	I0731 10:52:34.558494   96871 command_runner.go:130] > [certs] Generating "etcd/peer" certificate and key
	I0731 10:52:34.558618   96871 kubeadm.go:322] [certs] etcd/peer serving cert is signed for DNS names [localhost multinode-776386] and IPs [192.168.58.2 127.0.0.1 ::1]
	I0731 10:52:34.558625   96871 command_runner.go:130] > [certs] etcd/peer serving cert is signed for DNS names [localhost multinode-776386] and IPs [192.168.58.2 127.0.0.1 ::1]
	I0731 10:52:34.558704   96871 kubeadm.go:322] [certs] Generating "etcd/healthcheck-client" certificate and key
	I0731 10:52:34.558724   96871 command_runner.go:130] > [certs] Generating "etcd/healthcheck-client" certificate and key
	I0731 10:52:34.558790   96871 kubeadm.go:322] [certs] Generating "apiserver-etcd-client" certificate and key
	I0731 10:52:34.558805   96871 command_runner.go:130] > [certs] Generating "apiserver-etcd-client" certificate and key
	I0731 10:52:34.558873   96871 kubeadm.go:322] [certs] Generating "sa" key and public key
	I0731 10:52:34.558882   96871 command_runner.go:130] > [certs] Generating "sa" key and public key
	I0731 10:52:34.558971   96871 kubeadm.go:322] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
	I0731 10:52:34.558983   96871 command_runner.go:130] > [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
	I0731 10:52:34.559045   96871 kubeadm.go:322] [kubeconfig] Writing "admin.conf" kubeconfig file
	I0731 10:52:34.559055   96871 command_runner.go:130] > [kubeconfig] Writing "admin.conf" kubeconfig file
	I0731 10:52:34.559131   96871 kubeadm.go:322] [kubeconfig] Writing "kubelet.conf" kubeconfig file
	I0731 10:52:34.559143   96871 command_runner.go:130] > [kubeconfig] Writing "kubelet.conf" kubeconfig file
	I0731 10:52:34.559221   96871 kubeadm.go:322] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
	I0731 10:52:34.559231   96871 command_runner.go:130] > [kubeconfig] Writing "controller-manager.conf" kubeconfig file
	I0731 10:52:34.559316   96871 kubeadm.go:322] [kubeconfig] Writing "scheduler.conf" kubeconfig file
	I0731 10:52:34.559328   96871 command_runner.go:130] > [kubeconfig] Writing "scheduler.conf" kubeconfig file
	I0731 10:52:34.559475   96871 kubeadm.go:322] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
	I0731 10:52:34.559490   96871 command_runner.go:130] > [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
	I0731 10:52:34.559620   96871 kubeadm.go:322] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
	I0731 10:52:34.559630   96871 command_runner.go:130] > [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
	I0731 10:52:34.559659   96871 kubeadm.go:322] [kubelet-start] Starting the kubelet
	I0731 10:52:34.559665   96871 command_runner.go:130] > [kubelet-start] Starting the kubelet
	I0731 10:52:34.559714   96871 kubeadm.go:322] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
	I0731 10:52:34.561151   96871 out.go:204]   - Booting up control plane ...
	I0731 10:52:34.559767   96871 command_runner.go:130] > [control-plane] Using manifest folder "/etc/kubernetes/manifests"
	I0731 10:52:34.561224   96871 kubeadm.go:322] [control-plane] Creating static Pod manifest for "kube-apiserver"
	I0731 10:52:34.561236   96871 command_runner.go:130] > [control-plane] Creating static Pod manifest for "kube-apiserver"
	I0731 10:52:34.561295   96871 kubeadm.go:322] [control-plane] Creating static Pod manifest for "kube-controller-manager"
	I0731 10:52:34.561318   96871 command_runner.go:130] > [control-plane] Creating static Pod manifest for "kube-controller-manager"
	I0731 10:52:34.561405   96871 kubeadm.go:322] [control-plane] Creating static Pod manifest for "kube-scheduler"
	I0731 10:52:34.561416   96871 command_runner.go:130] > [control-plane] Creating static Pod manifest for "kube-scheduler"
	I0731 10:52:34.561481   96871 kubeadm.go:322] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
	I0731 10:52:34.561488   96871 command_runner.go:130] > [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
	I0731 10:52:34.561620   96871 kubeadm.go:322] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
	I0731 10:52:34.561628   96871 command_runner.go:130] > [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
	I0731 10:52:34.561687   96871 kubeadm.go:322] [apiclient] All control plane components are healthy after 5.002592 seconds
	I0731 10:52:34.561707   96871 command_runner.go:130] > [apiclient] All control plane components are healthy after 5.002592 seconds
	I0731 10:52:34.561813   96871 kubeadm.go:322] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
	I0731 10:52:34.561821   96871 command_runner.go:130] > [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
	I0731 10:52:34.561928   96871 kubeadm.go:322] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
	I0731 10:52:34.561937   96871 command_runner.go:130] > [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
	I0731 10:52:34.561982   96871 kubeadm.go:322] [upload-certs] Skipping phase. Please see --upload-certs
	I0731 10:52:34.561993   96871 command_runner.go:130] > [upload-certs] Skipping phase. Please see --upload-certs
	I0731 10:52:34.562264   96871 kubeadm.go:322] [mark-control-plane] Marking the node multinode-776386 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
	I0731 10:52:34.562277   96871 command_runner.go:130] > [mark-control-plane] Marking the node multinode-776386 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
	I0731 10:52:34.562371   96871 kubeadm.go:322] [bootstrap-token] Using token: q2ola7.rnrdzw4eqhcfeech
	I0731 10:52:34.563756   96871 out.go:204]   - Configuring RBAC rules ...
	I0731 10:52:34.562438   96871 command_runner.go:130] > [bootstrap-token] Using token: q2ola7.rnrdzw4eqhcfeech
	I0731 10:52:34.563851   96871 kubeadm.go:322] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
	I0731 10:52:34.563862   96871 command_runner.go:130] > [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
	I0731 10:52:34.563943   96871 kubeadm.go:322] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
	I0731 10:52:34.563951   96871 command_runner.go:130] > [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
	I0731 10:52:34.564096   96871 kubeadm.go:322] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
	I0731 10:52:34.564107   96871 command_runner.go:130] > [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
	I0731 10:52:34.564223   96871 kubeadm.go:322] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
	I0731 10:52:34.564230   96871 command_runner.go:130] > [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
	I0731 10:52:34.564327   96871 kubeadm.go:322] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
	I0731 10:52:34.564334   96871 command_runner.go:130] > [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
	I0731 10:52:34.564421   96871 kubeadm.go:322] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
	I0731 10:52:34.564432   96871 command_runner.go:130] > [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
	I0731 10:52:34.564532   96871 kubeadm.go:322] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
	I0731 10:52:34.564539   96871 command_runner.go:130] > [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
	I0731 10:52:34.564573   96871 kubeadm.go:322] [addons] Applied essential addon: CoreDNS
	I0731 10:52:34.564579   96871 command_runner.go:130] > [addons] Applied essential addon: CoreDNS
	I0731 10:52:34.564615   96871 kubeadm.go:322] [addons] Applied essential addon: kube-proxy
	I0731 10:52:34.564621   96871 command_runner.go:130] > [addons] Applied essential addon: kube-proxy
	I0731 10:52:34.564624   96871 kubeadm.go:322] 
	I0731 10:52:34.564675   96871 kubeadm.go:322] Your Kubernetes control-plane has initialized successfully!
	I0731 10:52:34.564681   96871 command_runner.go:130] > Your Kubernetes control-plane has initialized successfully!
	I0731 10:52:34.564684   96871 kubeadm.go:322] 
	I0731 10:52:34.564767   96871 kubeadm.go:322] To start using your cluster, you need to run the following as a regular user:
	I0731 10:52:34.564780   96871 command_runner.go:130] > To start using your cluster, you need to run the following as a regular user:
	I0731 10:52:34.564792   96871 kubeadm.go:322] 
	I0731 10:52:34.564823   96871 kubeadm.go:322]   mkdir -p $HOME/.kube
	I0731 10:52:34.564830   96871 command_runner.go:130] >   mkdir -p $HOME/.kube
	I0731 10:52:34.564897   96871 kubeadm.go:322]   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
	I0731 10:52:34.564904   96871 command_runner.go:130] >   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
	I0731 10:52:34.564961   96871 kubeadm.go:322]   sudo chown $(id -u):$(id -g) $HOME/.kube/config
	I0731 10:52:34.564971   96871 command_runner.go:130] >   sudo chown $(id -u):$(id -g) $HOME/.kube/config
	I0731 10:52:34.564979   96871 kubeadm.go:322] 
	I0731 10:52:34.565062   96871 kubeadm.go:322] Alternatively, if you are the root user, you can run:
	I0731 10:52:34.565071   96871 command_runner.go:130] > Alternatively, if you are the root user, you can run:
	I0731 10:52:34.565074   96871 kubeadm.go:322] 
	I0731 10:52:34.565113   96871 kubeadm.go:322]   export KUBECONFIG=/etc/kubernetes/admin.conf
	I0731 10:52:34.565124   96871 command_runner.go:130] >   export KUBECONFIG=/etc/kubernetes/admin.conf
	I0731 10:52:34.565130   96871 kubeadm.go:322] 
	I0731 10:52:34.565177   96871 kubeadm.go:322] You should now deploy a pod network to the cluster.
	I0731 10:52:34.565185   96871 command_runner.go:130] > You should now deploy a pod network to the cluster.
	I0731 10:52:34.565249   96871 kubeadm.go:322] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
	I0731 10:52:34.565257   96871 command_runner.go:130] > Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
	I0731 10:52:34.565360   96871 kubeadm.go:322]   https://kubernetes.io/docs/concepts/cluster-administration/addons/
	I0731 10:52:34.565367   96871 command_runner.go:130] >   https://kubernetes.io/docs/concepts/cluster-administration/addons/
	I0731 10:52:34.565371   96871 kubeadm.go:322] 
	I0731 10:52:34.565441   96871 kubeadm.go:322] You can now join any number of control-plane nodes by copying certificate authorities
	I0731 10:52:34.565453   96871 command_runner.go:130] > You can now join any number of control-plane nodes by copying certificate authorities
	I0731 10:52:34.565530   96871 kubeadm.go:322] and service account keys on each node and then running the following as root:
	I0731 10:52:34.565537   96871 command_runner.go:130] > and service account keys on each node and then running the following as root:
	I0731 10:52:34.565540   96871 kubeadm.go:322] 
	I0731 10:52:34.565657   96871 kubeadm.go:322]   kubeadm join control-plane.minikube.internal:8443 --token q2ola7.rnrdzw4eqhcfeech \
	I0731 10:52:34.565665   96871 command_runner.go:130] >   kubeadm join control-plane.minikube.internal:8443 --token q2ola7.rnrdzw4eqhcfeech \
	I0731 10:52:34.565740   96871 kubeadm.go:322] 	--discovery-token-ca-cert-hash sha256:332f1e2c8f6e50afb8fc2995698452a3be7de85c383b512a61c05acf2d3047a4 \
	I0731 10:52:34.565747   96871 command_runner.go:130] > 	--discovery-token-ca-cert-hash sha256:332f1e2c8f6e50afb8fc2995698452a3be7de85c383b512a61c05acf2d3047a4 \
	I0731 10:52:34.565772   96871 kubeadm.go:322] 	--control-plane 
	I0731 10:52:34.565778   96871 command_runner.go:130] > 	--control-plane 
	I0731 10:52:34.565781   96871 kubeadm.go:322] 
	I0731 10:52:34.565854   96871 kubeadm.go:322] Then you can join any number of worker nodes by running the following on each as root:
	I0731 10:52:34.565862   96871 command_runner.go:130] > Then you can join any number of worker nodes by running the following on each as root:
	I0731 10:52:34.565866   96871 kubeadm.go:322] 
	I0731 10:52:34.565932   96871 kubeadm.go:322] kubeadm join control-plane.minikube.internal:8443 --token q2ola7.rnrdzw4eqhcfeech \
	I0731 10:52:34.565938   96871 command_runner.go:130] > kubeadm join control-plane.minikube.internal:8443 --token q2ola7.rnrdzw4eqhcfeech \
	I0731 10:52:34.566024   96871 kubeadm.go:322] 	--discovery-token-ca-cert-hash sha256:332f1e2c8f6e50afb8fc2995698452a3be7de85c383b512a61c05acf2d3047a4 
	I0731 10:52:34.566030   96871 command_runner.go:130] > 	--discovery-token-ca-cert-hash sha256:332f1e2c8f6e50afb8fc2995698452a3be7de85c383b512a61c05acf2d3047a4 
	I0731 10:52:34.566047   96871 cni.go:84] Creating CNI manager for ""
	I0731 10:52:34.566061   96871 cni.go:136] 1 nodes found, recommending kindnet
	I0731 10:52:34.567641   96871 out.go:177] * Configuring CNI (Container Networking Interface) ...
	I0731 10:52:34.568906   96871 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
	I0731 10:52:34.608189   96871 command_runner.go:130] >   File: /opt/cni/bin/portmap
	I0731 10:52:34.608216   96871 command_runner.go:130] >   Size: 3955775   	Blocks: 7728       IO Block: 4096   regular file
	I0731 10:52:34.608226   96871 command_runner.go:130] > Device: 37h/55d	Inode: 805394      Links: 1
	I0731 10:52:34.608237   96871 command_runner.go:130] > Access: (0755/-rwxr-xr-x)  Uid: (    0/    root)   Gid: (    0/    root)
	I0731 10:52:34.608250   96871 command_runner.go:130] > Access: 2023-05-09 19:53:47.000000000 +0000
	I0731 10:52:34.608261   96871 command_runner.go:130] > Modify: 2023-05-09 19:53:47.000000000 +0000
	I0731 10:52:34.608269   96871 command_runner.go:130] > Change: 2023-07-31 10:33:56.255880962 +0000
	I0731 10:52:34.608279   96871 command_runner.go:130] >  Birth: 2023-07-31 10:33:56.227878277 +0000
	I0731 10:52:34.608331   96871 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.27.3/kubectl ...
	I0731 10:52:34.608345   96871 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2438 bytes)
	I0731 10:52:34.625757   96871 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
	I0731 10:52:35.264018   96871 command_runner.go:130] > clusterrole.rbac.authorization.k8s.io/kindnet created
	I0731 10:52:35.264040   96871 command_runner.go:130] > clusterrolebinding.rbac.authorization.k8s.io/kindnet created
	I0731 10:52:35.264046   96871 command_runner.go:130] > serviceaccount/kindnet created
	I0731 10:52:35.264050   96871 command_runner.go:130] > daemonset.apps/kindnet created
	I0731 10:52:35.264087   96871 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
	I0731 10:52:35.264167   96871 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:52:35.264186   96871 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl label nodes minikube.k8s.io/version=v1.31.1 minikube.k8s.io/commit=a7848ba25aaaad8ebb50e721c0d343e471188fc7 minikube.k8s.io/name=multinode-776386 minikube.k8s.io/updated_at=2023_07_31T10_52_35_0700 minikube.k8s.io/primary=true --all --overwrite --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:52:35.270758   96871 command_runner.go:130] > -16
	I0731 10:52:35.270790   96871 ops.go:34] apiserver oom_adj: -16
	I0731 10:52:35.326754   96871 command_runner.go:130] > clusterrolebinding.rbac.authorization.k8s.io/minikube-rbac created
	I0731 10:52:35.330891   96871 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:52:35.336003   96871 command_runner.go:130] > node/multinode-776386 labeled
	I0731 10:52:35.390358   96871 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
	I0731 10:52:35.393010   96871 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:52:35.456311   96871 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
	I0731 10:52:35.959376   96871 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:52:36.016925   96871 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
	I0731 10:52:36.458893   96871 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:52:36.519641   96871 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
	I0731 10:52:36.959172   96871 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:52:37.021067   96871 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
	I0731 10:52:37.459411   96871 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:52:37.521246   96871 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
	I0731 10:52:37.959800   96871 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:52:38.020970   96871 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
	I0731 10:52:38.459559   96871 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:52:38.523090   96871 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
	I0731 10:52:38.959725   96871 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:52:39.023437   96871 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
	I0731 10:52:39.458945   96871 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:52:39.520791   96871 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
	I0731 10:52:39.959443   96871 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:52:40.017938   96871 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
	I0731 10:52:40.458909   96871 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:52:40.521801   96871 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
	I0731 10:52:40.959798   96871 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:52:41.021193   96871 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
	I0731 10:52:41.459261   96871 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:52:41.521157   96871 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
	I0731 10:52:41.959758   96871 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:52:42.018951   96871 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
	I0731 10:52:42.459843   96871 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:52:42.522306   96871 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
	I0731 10:52:42.959833   96871 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:52:43.021001   96871 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
	I0731 10:52:43.459426   96871 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:52:43.523263   96871 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
	I0731 10:52:43.959024   96871 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:52:44.019646   96871 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
	I0731 10:52:44.459420   96871 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:52:44.519420   96871 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
	I0731 10:52:44.959389   96871 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:52:45.022472   96871 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
	I0731 10:52:45.459329   96871 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:52:45.521028   96871 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
	I0731 10:52:45.959201   96871 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:52:46.021338   96871 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
	I0731 10:52:46.459380   96871 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:52:46.520781   96871 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
	I0731 10:52:46.959004   96871 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:52:47.021571   96871 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
	I0731 10:52:47.459079   96871 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:52:47.522044   96871 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
	I0731 10:52:47.959376   96871 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:52:48.020767   96871 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
	I0731 10:52:48.459376   96871 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0731 10:52:48.531982   96871 command_runner.go:130] > NAME      SECRETS   AGE
	I0731 10:52:48.532007   96871 command_runner.go:130] > default   0         0s
	I0731 10:52:48.534260   96871 kubeadm.go:1081] duration metric: took 13.270141586s to wait for elevateKubeSystemPrivileges.
	I0731 10:52:48.534292   96871 kubeadm.go:406] StartCluster complete in 22.854264233s
	I0731 10:52:48.534312   96871 settings.go:142] acquiring lock: {Name:mk1af30688f984f447d2a45e33362813edbbcab6 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0731 10:52:48.534384   96871 settings.go:150] Updating kubeconfig:  /home/jenkins/minikube-integration/16969-5799/kubeconfig
	I0731 10:52:48.535017   96871 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/16969-5799/kubeconfig: {Name:mkf8010bda730fc5f9ac63bea8b114101911b8e2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0731 10:52:48.535235   96871 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.27.3/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
	I0731 10:52:48.535270   96871 addons.go:499] enable addons start: toEnable=map[ambassador:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false helm-tiller:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false volumesnapshots:false]
	I0731 10:52:48.535382   96871 addons.go:69] Setting storage-provisioner=true in profile "multinode-776386"
	I0731 10:52:48.535396   96871 addons.go:69] Setting default-storageclass=true in profile "multinode-776386"
	I0731 10:52:48.535413   96871 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "multinode-776386"
	I0731 10:52:48.535416   96871 config.go:182] Loaded profile config "multinode-776386": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.27.3
	I0731 10:52:48.535420   96871 addons.go:231] Setting addon storage-provisioner=true in "multinode-776386"
	I0731 10:52:48.535475   96871 host.go:66] Checking if "multinode-776386" exists ...
	I0731 10:52:48.535590   96871 loader.go:373] Config loaded from file:  /home/jenkins/minikube-integration/16969-5799/kubeconfig
	I0731 10:52:48.535795   96871 cli_runner.go:164] Run: docker container inspect multinode-776386 --format={{.State.Status}}
	I0731 10:52:48.535899   96871 kapi.go:59] client config for multinode-776386: &rest.Config{Host:"https://192.168.58.2:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/client.crt", KeyFile:"/home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/client.key", CAFile:"/home/jenkins/minikube-integration/16969-5799/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProt
os:[]string(nil)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x19c2840), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
	I0731 10:52:48.535979   96871 cli_runner.go:164] Run: docker container inspect multinode-776386 --format={{.State.Status}}
	I0731 10:52:48.536778   96871 cert_rotation.go:137] Starting client certificate rotation controller
	I0731 10:52:48.537015   96871 round_trippers.go:463] GET https://192.168.58.2:8443/apis/apps/v1/namespaces/kube-system/deployments/coredns/scale
	I0731 10:52:48.537029   96871 round_trippers.go:469] Request Headers:
	I0731 10:52:48.537037   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:52:48.537043   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:52:48.548862   96871 round_trippers.go:574] Response Status: 200 OK in 11 milliseconds
	I0731 10:52:48.548887   96871 round_trippers.go:577] Response Headers:
	I0731 10:52:48.548898   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:52:48.548907   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:52:48.548915   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:52:48.548924   96871 round_trippers.go:580]     Content-Length: 291
	I0731 10:52:48.548935   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:52:48 GMT
	I0731 10:52:48.548943   96871 round_trippers.go:580]     Audit-Id: 4a58a0f8-a949-4f66-b786-72d624a08f8a
	I0731 10:52:48.548951   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:52:48.548981   96871 request.go:1188] Response Body: {"kind":"Scale","apiVersion":"autoscaling/v1","metadata":{"name":"coredns","namespace":"kube-system","uid":"fbc8330f-80e7-4b50-9ba4-87f8ccbfc79a","resourceVersion":"363","creationTimestamp":"2023-07-31T10:52:34Z"},"spec":{"replicas":2},"status":{"replicas":0,"selector":"k8s-app=kube-dns"}}
	I0731 10:52:48.552252   96871 request.go:1188] Request Body: {"kind":"Scale","apiVersion":"autoscaling/v1","metadata":{"name":"coredns","namespace":"kube-system","uid":"fbc8330f-80e7-4b50-9ba4-87f8ccbfc79a","resourceVersion":"363","creationTimestamp":"2023-07-31T10:52:34Z"},"spec":{"replicas":1},"status":{"replicas":0,"selector":"k8s-app=kube-dns"}}
	I0731 10:52:48.552338   96871 round_trippers.go:463] PUT https://192.168.58.2:8443/apis/apps/v1/namespaces/kube-system/deployments/coredns/scale
	I0731 10:52:48.552346   96871 round_trippers.go:469] Request Headers:
	I0731 10:52:48.552359   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:52:48.552370   96871 round_trippers.go:473]     Content-Type: application/json
	I0731 10:52:48.552381   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:52:48.561146   96871 out.go:177]   - Using image gcr.io/k8s-minikube/storage-provisioner:v5
	I0731 10:52:48.559755   96871 loader.go:373] Config loaded from file:  /home/jenkins/minikube-integration/16969-5799/kubeconfig
	I0731 10:52:48.562429   96871 addons.go:423] installing /etc/kubernetes/addons/storage-provisioner.yaml
	I0731 10:52:48.562447   96871 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
	I0731 10:52:48.562499   96871 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-776386
	I0731 10:52:48.562631   96871 kapi.go:59] client config for multinode-776386: &rest.Config{Host:"https://192.168.58.2:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/client.crt", KeyFile:"/home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/client.key", CAFile:"/home/jenkins/minikube-integration/16969-5799/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProt
os:[]string(nil)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x19c2840), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
	I0731 10:52:48.562943   96871 round_trippers.go:463] GET https://192.168.58.2:8443/apis/storage.k8s.io/v1/storageclasses
	I0731 10:52:48.562954   96871 round_trippers.go:469] Request Headers:
	I0731 10:52:48.562961   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:52:48.562968   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:52:48.577543   96871 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32847 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/multinode-776386/id_rsa Username:docker}
	I0731 10:52:48.608079   96871 round_trippers.go:574] Response Status: 200 OK in 55 milliseconds
	I0731 10:52:48.608125   96871 round_trippers.go:577] Response Headers:
	I0731 10:52:48.608137   96871 round_trippers.go:580]     Content-Length: 291
	I0731 10:52:48.608152   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:52:48 GMT
	I0731 10:52:48.608162   96871 round_trippers.go:580]     Audit-Id: d120596a-4ffc-4335-9892-37f3ef4878f8
	I0731 10:52:48.608169   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:52:48.608179   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:52:48.608190   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:52:48.608199   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:52:48.608089   96871 round_trippers.go:574] Response Status: 200 OK in 45 milliseconds
	I0731 10:52:48.608227   96871 round_trippers.go:577] Response Headers:
	I0731 10:52:48.608229   96871 request.go:1188] Response Body: {"kind":"Scale","apiVersion":"autoscaling/v1","metadata":{"name":"coredns","namespace":"kube-system","uid":"fbc8330f-80e7-4b50-9ba4-87f8ccbfc79a","resourceVersion":"373","creationTimestamp":"2023-07-31T10:52:34Z"},"spec":{"replicas":1},"status":{"replicas":0,"selector":"k8s-app=kube-dns"}}
	I0731 10:52:48.608239   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:52:48.608249   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:52:48.608262   96871 round_trippers.go:580]     Content-Length: 109
	I0731 10:52:48.608273   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:52:48 GMT
	I0731 10:52:48.608282   96871 round_trippers.go:580]     Audit-Id: 7925291d-0d55-4eb7-ba97-00dc2a2c32bd
	I0731 10:52:48.608292   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:52:48.608300   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:52:48.608322   96871 request.go:1188] Response Body: {"kind":"StorageClassList","apiVersion":"storage.k8s.io/v1","metadata":{"resourceVersion":"373"},"items":[]}
	I0731 10:52:48.608422   96871 round_trippers.go:463] GET https://192.168.58.2:8443/apis/apps/v1/namespaces/kube-system/deployments/coredns/scale
	I0731 10:52:48.608436   96871 round_trippers.go:469] Request Headers:
	I0731 10:52:48.608446   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:52:48.608455   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:52:48.608592   96871 addons.go:231] Setting addon default-storageclass=true in "multinode-776386"
	I0731 10:52:48.608634   96871 host.go:66] Checking if "multinode-776386" exists ...
	I0731 10:52:48.609115   96871 cli_runner.go:164] Run: docker container inspect multinode-776386 --format={{.State.Status}}
	I0731 10:52:48.610809   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:52:48.610829   96871 round_trippers.go:577] Response Headers:
	I0731 10:52:48.610840   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:52:48.610850   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:52:48.610861   96871 round_trippers.go:580]     Content-Length: 291
	I0731 10:52:48.610873   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:52:48 GMT
	I0731 10:52:48.610885   96871 round_trippers.go:580]     Audit-Id: 177e5060-3d29-439f-9696-ee1240ad695e
	I0731 10:52:48.610894   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:52:48.610906   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:52:48.611088   96871 request.go:1188] Response Body: {"kind":"Scale","apiVersion":"autoscaling/v1","metadata":{"name":"coredns","namespace":"kube-system","uid":"fbc8330f-80e7-4b50-9ba4-87f8ccbfc79a","resourceVersion":"373","creationTimestamp":"2023-07-31T10:52:34Z"},"spec":{"replicas":1},"status":{"replicas":0,"selector":"k8s-app=kube-dns"}}
	I0731 10:52:48.611200   96871 kapi.go:248] "coredns" deployment in "kube-system" namespace and "multinode-776386" context rescaled to 1 replicas
	I0731 10:52:48.611234   96871 start.go:223] Will wait 6m0s for node &{Name: IP:192.168.58.2 Port:8443 KubernetesVersion:v1.27.3 ContainerRuntime:crio ControlPlane:true Worker:true}
	I0731 10:52:48.612873   96871 out.go:177] * Verifying Kubernetes components...
	I0731 10:52:48.614300   96871 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I0731 10:52:48.629746   96871 addons.go:423] installing /etc/kubernetes/addons/storageclass.yaml
	I0731 10:52:48.629769   96871 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
	I0731 10:52:48.629819   96871 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-776386
	I0731 10:52:48.649067   96871 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32847 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/multinode-776386/id_rsa Username:docker}
	I0731 10:52:48.717628   96871 command_runner.go:130] > apiVersion: v1
	I0731 10:52:48.717652   96871 command_runner.go:130] > data:
	I0731 10:52:48.717658   96871 command_runner.go:130] >   Corefile: |
	I0731 10:52:48.717663   96871 command_runner.go:130] >     .:53 {
	I0731 10:52:48.717669   96871 command_runner.go:130] >         errors
	I0731 10:52:48.717676   96871 command_runner.go:130] >         health {
	I0731 10:52:48.717683   96871 command_runner.go:130] >            lameduck 5s
	I0731 10:52:48.717688   96871 command_runner.go:130] >         }
	I0731 10:52:48.717694   96871 command_runner.go:130] >         ready
	I0731 10:52:48.717703   96871 command_runner.go:130] >         kubernetes cluster.local in-addr.arpa ip6.arpa {
	I0731 10:52:48.717710   96871 command_runner.go:130] >            pods insecure
	I0731 10:52:48.717718   96871 command_runner.go:130] >            fallthrough in-addr.arpa ip6.arpa
	I0731 10:52:48.717733   96871 command_runner.go:130] >            ttl 30
	I0731 10:52:48.717743   96871 command_runner.go:130] >         }
	I0731 10:52:48.717751   96871 command_runner.go:130] >         prometheus :9153
	I0731 10:52:48.717767   96871 command_runner.go:130] >         forward . /etc/resolv.conf {
	I0731 10:52:48.717775   96871 command_runner.go:130] >            max_concurrent 1000
	I0731 10:52:48.717782   96871 command_runner.go:130] >         }
	I0731 10:52:48.717789   96871 command_runner.go:130] >         cache 30
	I0731 10:52:48.717799   96871 command_runner.go:130] >         loop
	I0731 10:52:48.717807   96871 command_runner.go:130] >         reload
	I0731 10:52:48.717816   96871 command_runner.go:130] >         loadbalance
	I0731 10:52:48.717823   96871 command_runner.go:130] >     }
	I0731 10:52:48.717833   96871 command_runner.go:130] > kind: ConfigMap
	I0731 10:52:48.717843   96871 command_runner.go:130] > metadata:
	I0731 10:52:48.717854   96871 command_runner.go:130] >   creationTimestamp: "2023-07-31T10:52:34Z"
	I0731 10:52:48.717864   96871 command_runner.go:130] >   name: coredns
	I0731 10:52:48.717871   96871 command_runner.go:130] >   namespace: kube-system
	I0731 10:52:48.717881   96871 command_runner.go:130] >   resourceVersion: "255"
	I0731 10:52:48.717889   96871 command_runner.go:130] >   uid: a7d32046-5b64-4ea2-9f1b-3c1f18e9dc44
	I0731 10:52:48.721150   96871 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.27.3/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^        forward . \/etc\/resolv.conf.*/i \        hosts {\n           192.168.58.1 host.minikube.internal\n           fallthrough\n        }' -e '/^        errors *$/i \        log' | sudo /var/lib/minikube/binaries/v1.27.3/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
	I0731 10:52:48.721449   96871 loader.go:373] Config loaded from file:  /home/jenkins/minikube-integration/16969-5799/kubeconfig
	I0731 10:52:48.721762   96871 kapi.go:59] client config for multinode-776386: &rest.Config{Host:"https://192.168.58.2:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/client.crt", KeyFile:"/home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/client.key", CAFile:"/home/jenkins/minikube-integration/16969-5799/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProt
os:[]string(nil)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x19c2840), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
	I0731 10:52:48.722114   96871 node_ready.go:35] waiting up to 6m0s for node "multinode-776386" to be "Ready" ...
	I0731 10:52:48.722249   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:52:48.722260   96871 round_trippers.go:469] Request Headers:
	I0731 10:52:48.722272   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:52:48.722287   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:52:48.725591   96871 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
	I0731 10:52:48.725620   96871 round_trippers.go:577] Response Headers:
	I0731 10:52:48.725631   96871 round_trippers.go:580]     Audit-Id: 002898cc-93a1-4421-9d67-b762849eb480
	I0731 10:52:48.725640   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:52:48.725650   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:52:48.725664   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:52:48.725674   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:52:48.725683   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:52:48 GMT
	I0731 10:52:48.725815   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:52:48.726660   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:52:48.726716   96871 round_trippers.go:469] Request Headers:
	I0731 10:52:48.726741   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:52:48.726760   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:52:48.727198   96871 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.27.3/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
	I0731 10:52:48.733750   96871 round_trippers.go:574] Response Status: 200 OK in 6 milliseconds
	I0731 10:52:48.733775   96871 round_trippers.go:577] Response Headers:
	I0731 10:52:48.733784   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:52:48 GMT
	I0731 10:52:48.733802   96871 round_trippers.go:580]     Audit-Id: ba2f341f-8564-4512-971d-641ffae2022b
	I0731 10:52:48.733814   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:52:48.733822   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:52:48.733831   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:52:48.733845   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:52:48.734035   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:52:48.924639   96871 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.27.3/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
	I0731 10:52:49.235117   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:52:49.235136   96871 round_trippers.go:469] Request Headers:
	I0731 10:52:49.235145   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:52:49.235151   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:52:49.307909   96871 round_trippers.go:574] Response Status: 200 OK in 72 milliseconds
	I0731 10:52:49.307960   96871 round_trippers.go:577] Response Headers:
	I0731 10:52:49.307972   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:52:49.307982   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:52:49.307997   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:52:49 GMT
	I0731 10:52:49.308007   96871 round_trippers.go:580]     Audit-Id: 0a0e31b5-d716-497c-800e-55ded46594a0
	I0731 10:52:49.308031   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:52:49.308047   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:52:49.308228   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:52:49.521745   96871 command_runner.go:130] > configmap/coredns replaced
	I0731 10:52:49.527491   96871 start.go:901] {"host.minikube.internal": 192.168.58.1} host record injected into CoreDNS's ConfigMap
	I0731 10:52:49.734939   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:52:49.734964   96871 round_trippers.go:469] Request Headers:
	I0731 10:52:49.734976   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:52:49.734987   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:52:49.736792   96871 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
	I0731 10:52:49.736816   96871 round_trippers.go:577] Response Headers:
	I0731 10:52:49.736826   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:52:49 GMT
	I0731 10:52:49.736834   96871 round_trippers.go:580]     Audit-Id: 673e6736-e8d3-46e4-85bf-af2f6e0d06bb
	I0731 10:52:49.736843   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:52:49.736851   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:52:49.736864   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:52:49.736875   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:52:49.737004   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:52:49.775262   96871 command_runner.go:130] > serviceaccount/storage-provisioner created
	I0731 10:52:49.775291   96871 command_runner.go:130] > clusterrolebinding.rbac.authorization.k8s.io/storage-provisioner created
	I0731 10:52:49.775304   96871 command_runner.go:130] > role.rbac.authorization.k8s.io/system:persistent-volume-provisioner created
	I0731 10:52:49.775316   96871 command_runner.go:130] > rolebinding.rbac.authorization.k8s.io/system:persistent-volume-provisioner created
	I0731 10:52:49.775328   96871 command_runner.go:130] > endpoints/k8s.io-minikube-hostpath created
	I0731 10:52:49.775339   96871 command_runner.go:130] > pod/storage-provisioner created
	I0731 10:52:49.775366   96871 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.27.3/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (1.048142756s)
	I0731 10:52:49.775391   96871 command_runner.go:130] > storageclass.storage.k8s.io/standard created
	I0731 10:52:49.777090   96871 out.go:177] * Enabled addons: storage-provisioner, default-storageclass
	I0731 10:52:49.778279   96871 addons.go:502] enable addons completed in 1.243021881s: enabled=[storage-provisioner default-storageclass]
	I0731 10:52:50.234686   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:52:50.234704   96871 round_trippers.go:469] Request Headers:
	I0731 10:52:50.234712   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:52:50.234718   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:52:50.237190   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:52:50.237213   96871 round_trippers.go:577] Response Headers:
	I0731 10:52:50.237223   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:52:50.237233   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:52:50.237245   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:52:50.237254   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:52:50.237263   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:52:50 GMT
	I0731 10:52:50.237269   96871 round_trippers.go:580]     Audit-Id: f4f9aed7-f87c-42f3-b01b-9e7dd5855975
	I0731 10:52:50.237424   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:52:50.734694   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:52:50.734712   96871 round_trippers.go:469] Request Headers:
	I0731 10:52:50.734720   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:52:50.734726   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:52:50.736913   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:52:50.736936   96871 round_trippers.go:577] Response Headers:
	I0731 10:52:50.736947   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:52:50.736956   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:52:50.736968   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:52:50.736984   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:52:50 GMT
	I0731 10:52:50.736999   96871 round_trippers.go:580]     Audit-Id: 6f11c915-1956-4ada-8cca-044ba23e8136
	I0731 10:52:50.737014   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:52:50.737121   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:52:50.737409   96871 node_ready.go:58] node "multinode-776386" has status "Ready":"False"
	I0731 10:52:51.235413   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:52:51.235433   96871 round_trippers.go:469] Request Headers:
	I0731 10:52:51.235441   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:52:51.235447   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:52:51.237598   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:52:51.237622   96871 round_trippers.go:577] Response Headers:
	I0731 10:52:51.237632   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:52:51.237639   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:52:51.237644   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:52:51 GMT
	I0731 10:52:51.237650   96871 round_trippers.go:580]     Audit-Id: 2d643764-97b5-4478-bdb1-66c0c37d07ea
	I0731 10:52:51.237655   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:52:51.237661   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:52:51.237792   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:52:51.735423   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:52:51.735446   96871 round_trippers.go:469] Request Headers:
	I0731 10:52:51.735454   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:52:51.735460   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:52:51.737868   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:52:51.737891   96871 round_trippers.go:577] Response Headers:
	I0731 10:52:51.737903   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:52:51 GMT
	I0731 10:52:51.737912   96871 round_trippers.go:580]     Audit-Id: 27632e04-8028-4814-a4e5-ec0904097085
	I0731 10:52:51.737919   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:52:51.737925   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:52:51.737934   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:52:51.737940   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:52:51.738086   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:52:52.235629   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:52:52.235650   96871 round_trippers.go:469] Request Headers:
	I0731 10:52:52.235658   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:52:52.235666   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:52:52.238009   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:52:52.238031   96871 round_trippers.go:577] Response Headers:
	I0731 10:52:52.238042   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:52:52 GMT
	I0731 10:52:52.238050   96871 round_trippers.go:580]     Audit-Id: 10eeb871-b684-46d9-8d0f-5e5fe71a66cd
	I0731 10:52:52.238058   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:52:52.238066   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:52:52.238073   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:52:52.238082   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:52:52.238218   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:52:52.735418   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:52:52.735441   96871 round_trippers.go:469] Request Headers:
	I0731 10:52:52.735450   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:52:52.735458   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:52:52.737787   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:52:52.737816   96871 round_trippers.go:577] Response Headers:
	I0731 10:52:52.737827   96871 round_trippers.go:580]     Audit-Id: a07a83ca-4d72-4b77-a233-c89a7920eaad
	I0731 10:52:52.737837   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:52:52.737846   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:52:52.737856   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:52:52.737864   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:52:52.737872   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:52:52 GMT
	I0731 10:52:52.738007   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:52:52.738376   96871 node_ready.go:58] node "multinode-776386" has status "Ready":"False"
	I0731 10:52:53.235394   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:52:53.235413   96871 round_trippers.go:469] Request Headers:
	I0731 10:52:53.235421   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:52:53.235427   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:52:53.237640   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:52:53.237663   96871 round_trippers.go:577] Response Headers:
	I0731 10:52:53.237675   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:52:53.237689   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:52:53.237701   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:52:53 GMT
	I0731 10:52:53.237710   96871 round_trippers.go:580]     Audit-Id: 7f549177-5d3d-4054-aa17-5527cba55280
	I0731 10:52:53.237719   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:52:53.237726   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:52:53.237824   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:52:53.735383   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:52:53.735403   96871 round_trippers.go:469] Request Headers:
	I0731 10:52:53.735411   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:52:53.735417   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:52:53.737791   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:52:53.737810   96871 round_trippers.go:577] Response Headers:
	I0731 10:52:53.737819   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:52:53 GMT
	I0731 10:52:53.737828   96871 round_trippers.go:580]     Audit-Id: ed8a6759-c5bc-4378-a14c-e72739867884
	I0731 10:52:53.737837   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:52:53.737845   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:52:53.737854   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:52:53.737868   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:52:53.737995   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:52:54.235395   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:52:54.235414   96871 round_trippers.go:469] Request Headers:
	I0731 10:52:54.235422   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:52:54.235428   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:52:54.237644   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:52:54.237665   96871 round_trippers.go:577] Response Headers:
	I0731 10:52:54.237673   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:52:54.237681   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:52:54 GMT
	I0731 10:52:54.237689   96871 round_trippers.go:580]     Audit-Id: a2286c78-30d0-474a-8b1e-7115ca05b5fe
	I0731 10:52:54.237697   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:52:54.237709   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:52:54.237719   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:52:54.237816   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:52:54.735444   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:52:54.735462   96871 round_trippers.go:469] Request Headers:
	I0731 10:52:54.735471   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:52:54.735477   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:52:54.737680   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:52:54.737700   96871 round_trippers.go:577] Response Headers:
	I0731 10:52:54.737708   96871 round_trippers.go:580]     Audit-Id: 5c105cf4-cfad-4bd1-9b8b-fd0b87583f73
	I0731 10:52:54.737717   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:52:54.737725   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:52:54.737733   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:52:54.737742   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:52:54.737756   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:52:54 GMT
	I0731 10:52:54.737868   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:52:55.235417   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:52:55.235437   96871 round_trippers.go:469] Request Headers:
	I0731 10:52:55.235446   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:52:55.235452   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:52:55.237785   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:52:55.237807   96871 round_trippers.go:577] Response Headers:
	I0731 10:52:55.237816   96871 round_trippers.go:580]     Audit-Id: 10686d87-229e-4421-a2da-292caf69dc7e
	I0731 10:52:55.237824   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:52:55.237834   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:52:55.237846   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:52:55.237855   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:52:55.237865   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:52:55 GMT
	I0731 10:52:55.237960   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:52:55.238337   96871 node_ready.go:58] node "multinode-776386" has status "Ready":"False"
	I0731 10:52:55.735595   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:52:55.735619   96871 round_trippers.go:469] Request Headers:
	I0731 10:52:55.735631   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:52:55.735642   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:52:55.738064   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:52:55.738081   96871 round_trippers.go:577] Response Headers:
	I0731 10:52:55.738088   96871 round_trippers.go:580]     Audit-Id: 153f7871-c7ed-44e1-a485-e989e5928bbc
	I0731 10:52:55.738097   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:52:55.738104   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:52:55.738113   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:52:55.738121   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:52:55.738135   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:52:55 GMT
	I0731 10:52:55.738287   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:52:56.235412   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:52:56.235433   96871 round_trippers.go:469] Request Headers:
	I0731 10:52:56.235441   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:52:56.235447   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:52:56.237465   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:52:56.237482   96871 round_trippers.go:577] Response Headers:
	I0731 10:52:56.237489   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:52:56.237495   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:52:56 GMT
	I0731 10:52:56.237500   96871 round_trippers.go:580]     Audit-Id: 5315bc88-ab49-446d-b954-1e16ebc11ccc
	I0731 10:52:56.237505   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:52:56.237523   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:52:56.237535   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:52:56.237670   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:52:56.735406   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:52:56.735426   96871 round_trippers.go:469] Request Headers:
	I0731 10:52:56.735435   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:52:56.735441   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:52:56.737717   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:52:56.737739   96871 round_trippers.go:577] Response Headers:
	I0731 10:52:56.737750   96871 round_trippers.go:580]     Audit-Id: 91f8df57-5025-4998-80b6-4396c26c929f
	I0731 10:52:56.737759   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:52:56.737767   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:52:56.737778   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:52:56.737793   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:52:56.737801   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:52:56 GMT
	I0731 10:52:56.737949   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:52:57.235410   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:52:57.235428   96871 round_trippers.go:469] Request Headers:
	I0731 10:52:57.235436   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:52:57.235442   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:52:57.237516   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:52:57.237540   96871 round_trippers.go:577] Response Headers:
	I0731 10:52:57.237548   96871 round_trippers.go:580]     Audit-Id: 4b91d8d1-b688-4bd2-afd9-b69e28122cad
	I0731 10:52:57.237554   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:52:57.237559   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:52:57.237564   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:52:57.237570   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:52:57.237575   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:52:57 GMT
	I0731 10:52:57.237695   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:52:57.735366   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:52:57.735385   96871 round_trippers.go:469] Request Headers:
	I0731 10:52:57.735394   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:52:57.735400   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:52:57.737487   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:52:57.737514   96871 round_trippers.go:577] Response Headers:
	I0731 10:52:57.737520   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:52:57.737526   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:52:57 GMT
	I0731 10:52:57.737532   96871 round_trippers.go:580]     Audit-Id: 9a12bf95-230c-4b73-b0fb-f3773ffacd1e
	I0731 10:52:57.737537   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:52:57.737542   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:52:57.737547   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:52:57.737687   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:52:57.737987   96871 node_ready.go:58] node "multinode-776386" has status "Ready":"False"
	I0731 10:52:58.235399   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:52:58.235417   96871 round_trippers.go:469] Request Headers:
	I0731 10:52:58.235425   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:52:58.235432   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:52:58.237687   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:52:58.237709   96871 round_trippers.go:577] Response Headers:
	I0731 10:52:58.237720   96871 round_trippers.go:580]     Audit-Id: f76e03bf-c4b6-4068-92ed-a0ff3c402d1d
	I0731 10:52:58.237730   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:52:58.237739   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:52:58.237749   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:52:58.237763   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:52:58.237775   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:52:58 GMT
	I0731 10:52:58.237893   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:52:58.735631   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:52:58.735667   96871 round_trippers.go:469] Request Headers:
	I0731 10:52:58.735675   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:52:58.735682   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:52:58.738276   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:52:58.738299   96871 round_trippers.go:577] Response Headers:
	I0731 10:52:58.738310   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:52:58.738320   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:52:58 GMT
	I0731 10:52:58.738328   96871 round_trippers.go:580]     Audit-Id: 032f2765-1624-4e98-9a46-e27123dace48
	I0731 10:52:58.738334   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:52:58.738340   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:52:58.738349   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:52:58.738490   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:52:59.234993   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:52:59.235010   96871 round_trippers.go:469] Request Headers:
	I0731 10:52:59.235018   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:52:59.235024   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:52:59.237156   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:52:59.237189   96871 round_trippers.go:577] Response Headers:
	I0731 10:52:59.237201   96871 round_trippers.go:580]     Audit-Id: 0a5a1452-f5be-421d-87e7-e1857f0b602d
	I0731 10:52:59.237211   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:52:59.237220   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:52:59.237230   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:52:59.237241   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:52:59.237251   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:52:59 GMT
	I0731 10:52:59.237362   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:52:59.734888   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:52:59.734910   96871 round_trippers.go:469] Request Headers:
	I0731 10:52:59.734918   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:52:59.734929   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:52:59.737199   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:52:59.737219   96871 round_trippers.go:577] Response Headers:
	I0731 10:52:59.737232   96871 round_trippers.go:580]     Audit-Id: 270180e0-9ec0-4389-baff-7b91592aa1e5
	I0731 10:52:59.737242   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:52:59.737251   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:52:59.737261   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:52:59.737273   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:52:59.737283   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:52:59 GMT
	I0731 10:52:59.737404   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:00.234926   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:00.234945   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:00.234954   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:00.234961   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:00.237145   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:00.237162   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:00.237169   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:00.237175   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:00 GMT
	I0731 10:53:00.237180   96871 round_trippers.go:580]     Audit-Id: b5ddabdf-e01f-4ccb-8fae-b4b541d9c05c
	I0731 10:53:00.237185   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:00.237191   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:00.237196   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:00.237321   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:00.237669   96871 node_ready.go:58] node "multinode-776386" has status "Ready":"False"
	I0731 10:53:00.734879   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:00.734898   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:00.734906   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:00.734912   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:00.737091   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:00.737112   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:00.737123   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:00.737131   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:00 GMT
	I0731 10:53:00.737140   96871 round_trippers.go:580]     Audit-Id: 84c87393-f4ef-45b2-a2a3-b6b82d4f67c3
	I0731 10:53:00.737147   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:00.737155   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:00.737163   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:00.737278   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:01.234877   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:01.234897   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:01.234914   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:01.234922   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:01.236980   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:01.237001   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:01.237009   96871 round_trippers.go:580]     Audit-Id: 9915043e-6682-42a9-aea0-2ec0482bf4e6
	I0731 10:53:01.237015   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:01.237021   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:01.237026   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:01.237034   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:01.237039   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:01 GMT
	I0731 10:53:01.237158   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:01.734888   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:01.734912   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:01.734921   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:01.734927   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:01.737120   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:01.737145   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:01.737156   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:01.737165   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:01 GMT
	I0731 10:53:01.737174   96871 round_trippers.go:580]     Audit-Id: 5755761f-244f-4577-a594-11f99f3e79b0
	I0731 10:53:01.737182   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:01.737189   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:01.737194   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:01.737333   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:02.234854   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:02.234872   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:02.234885   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:02.234892   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:02.236775   96871 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
	I0731 10:53:02.236798   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:02.236808   96871 round_trippers.go:580]     Audit-Id: 666b421b-81f7-4f96-8509-1a6277cadb6e
	I0731 10:53:02.236817   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:02.236825   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:02.236834   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:02.236843   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:02.236853   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:02 GMT
	I0731 10:53:02.236944   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:02.735405   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:02.735422   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:02.735430   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:02.735436   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:02.737721   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:02.737744   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:02.737755   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:02.737764   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:02.737771   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:02 GMT
	I0731 10:53:02.737779   96871 round_trippers.go:580]     Audit-Id: c2af0cd1-403b-415c-b2d5-e38ddc72d627
	I0731 10:53:02.737790   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:02.737803   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:02.737935   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:02.738364   96871 node_ready.go:58] node "multinode-776386" has status "Ready":"False"
	I0731 10:53:03.235546   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:03.235568   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:03.235579   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:03.235588   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:03.237877   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:03.237898   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:03.237909   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:03.237919   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:03.237927   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:03.237937   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:03 GMT
	I0731 10:53:03.237946   96871 round_trippers.go:580]     Audit-Id: d2aad30d-5240-43d9-8e7b-4be360f54dfa
	I0731 10:53:03.237962   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:03.238076   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:03.734697   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:03.734724   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:03.734732   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:03.734738   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:03.737140   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:03.737160   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:03.737167   96871 round_trippers.go:580]     Audit-Id: 3535bc24-d8dc-4b70-a932-58ee02ed5c3e
	I0731 10:53:03.737173   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:03.737179   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:03.737184   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:03.737190   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:03.737195   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:03 GMT
	I0731 10:53:03.737328   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:04.234686   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:04.234706   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:04.234714   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:04.234725   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:04.236882   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:04.236898   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:04.236904   96871 round_trippers.go:580]     Audit-Id: 596d7fcf-017b-4900-9703-b2de269c0084
	I0731 10:53:04.236910   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:04.236916   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:04.236921   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:04.236926   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:04.236933   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:04 GMT
	I0731 10:53:04.237045   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:04.735396   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:04.735416   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:04.735424   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:04.735431   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:04.737504   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:04.737525   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:04.737535   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:04.737543   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:04 GMT
	I0731 10:53:04.737552   96871 round_trippers.go:580]     Audit-Id: bea184a7-4a2e-469d-97b6-c09dcc99e7b9
	I0731 10:53:04.737560   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:04.737568   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:04.737580   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:04.737710   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:05.235332   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:05.235355   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:05.235369   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:05.235377   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:05.237614   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:05.237631   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:05.237638   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:05.237644   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:05.237649   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:05.237654   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:05.237660   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:05 GMT
	I0731 10:53:05.237667   96871 round_trippers.go:580]     Audit-Id: 025dc7fe-0ef5-4439-aeee-9304d6d8a420
	I0731 10:53:05.237808   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:05.238101   96871 node_ready.go:58] node "multinode-776386" has status "Ready":"False"
	I0731 10:53:05.735407   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:05.735425   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:05.735433   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:05.735439   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:05.737603   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:05.737624   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:05.737636   96871 round_trippers.go:580]     Audit-Id: 7449da95-da56-4e85-b0d4-fbcbc033f279
	I0731 10:53:05.737645   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:05.737657   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:05.737669   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:05.737679   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:05.737684   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:05 GMT
	I0731 10:53:05.737830   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:06.235410   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:06.235429   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:06.235437   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:06.235443   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:06.237529   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:06.237546   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:06.237552   96871 round_trippers.go:580]     Audit-Id: 5cc6b788-329d-4323-8627-46f99189da9a
	I0731 10:53:06.237561   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:06.237570   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:06.237583   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:06.237597   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:06.237609   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:06 GMT
	I0731 10:53:06.237705   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:06.735400   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:06.735419   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:06.735427   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:06.735433   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:06.737516   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:06.737538   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:06.737547   96871 round_trippers.go:580]     Audit-Id: fef5e582-f2b0-4557-be86-581b0a76c7ec
	I0731 10:53:06.737556   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:06.737566   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:06.737579   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:06.737591   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:06.737599   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:06 GMT
	I0731 10:53:06.737713   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:07.235346   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:07.235370   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:07.235378   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:07.235384   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:07.237526   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:07.237548   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:07.237558   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:07 GMT
	I0731 10:53:07.237567   96871 round_trippers.go:580]     Audit-Id: 91d16e0f-c011-46e4-b1de-e151e027cdb0
	I0731 10:53:07.237578   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:07.237587   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:07.237600   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:07.237612   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:07.237713   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:07.735507   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:07.735525   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:07.735533   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:07.735540   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:07.737879   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:07.737902   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:07.737913   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:07.737921   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:07.737930   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:07.737942   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:07 GMT
	I0731 10:53:07.737953   96871 round_trippers.go:580]     Audit-Id: bbd7dfdd-9f10-401d-9bf3-d6150919388a
	I0731 10:53:07.737966   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:07.738077   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:07.738374   96871 node_ready.go:58] node "multinode-776386" has status "Ready":"False"
	I0731 10:53:08.234608   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:08.234626   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:08.234635   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:08.234641   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:08.236828   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:08.236851   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:08.236860   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:08.236869   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:08.236877   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:08 GMT
	I0731 10:53:08.236885   96871 round_trippers.go:580]     Audit-Id: 8fc18809-76bf-4596-847e-b005dbf6629b
	I0731 10:53:08.236894   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:08.236907   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:08.237008   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:08.734689   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:08.734709   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:08.734716   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:08.734722   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:08.736901   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:08.736921   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:08.736928   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:08.736934   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:08 GMT
	I0731 10:53:08.736943   96871 round_trippers.go:580]     Audit-Id: 096552b1-c620-4ce2-b972-0e38a3595532
	I0731 10:53:08.736952   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:08.736959   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:08.736969   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:08.737105   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:09.235398   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:09.235416   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:09.235424   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:09.235430   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:09.237587   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:09.237608   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:09.237618   96871 round_trippers.go:580]     Audit-Id: ad80ef89-c624-4ed8-9efa-677b7c474ae6
	I0731 10:53:09.237626   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:09.237633   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:09.237642   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:09.237650   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:09.237660   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:09 GMT
	I0731 10:53:09.237774   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:09.735391   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:09.735413   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:09.735427   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:09.735437   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:09.737504   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:09.737521   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:09.737528   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:09.737534   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:09 GMT
	I0731 10:53:09.737539   96871 round_trippers.go:580]     Audit-Id: c3cec3b6-3cf4-4c9a-9205-e6f5132201ad
	I0731 10:53:09.737549   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:09.737557   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:09.737565   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:09.737740   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:10.235418   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:10.235438   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:10.235446   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:10.235453   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:10.237656   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:10.237678   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:10.237688   96871 round_trippers.go:580]     Audit-Id: 8f21849e-d397-4ca6-a34c-7f60a43705d2
	I0731 10:53:10.237695   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:10.237702   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:10.237713   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:10.237726   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:10.237738   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:10 GMT
	I0731 10:53:10.237855   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:10.238287   96871 node_ready.go:58] node "multinode-776386" has status "Ready":"False"
	I0731 10:53:10.735418   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:10.735440   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:10.735453   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:10.735463   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:10.737778   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:10.737801   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:10.737811   96871 round_trippers.go:580]     Audit-Id: c92b1409-3547-4a36-98b7-b996b4fdd9bf
	I0731 10:53:10.737819   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:10.737828   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:10.737839   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:10.737850   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:10.737858   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:10 GMT
	I0731 10:53:10.737985   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:11.235398   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:11.235421   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:11.235434   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:11.235442   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:11.237777   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:11.237801   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:11.237810   96871 round_trippers.go:580]     Audit-Id: 7e95122b-bd62-48ae-8606-7a30f75429b6
	I0731 10:53:11.237819   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:11.237828   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:11.237834   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:11.237844   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:11.237855   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:11 GMT
	I0731 10:53:11.238029   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:11.735422   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:11.735442   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:11.735450   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:11.735456   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:11.737586   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:11.737609   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:11.737617   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:11 GMT
	I0731 10:53:11.737623   96871 round_trippers.go:580]     Audit-Id: 2ef16f51-df6f-4a96-bda4-0ea796bee393
	I0731 10:53:11.737629   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:11.737634   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:11.737639   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:11.737644   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:11.737771   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:12.235381   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:12.235400   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:12.235409   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:12.235415   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:12.237531   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:12.237551   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:12.237561   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:12.237568   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:12 GMT
	I0731 10:53:12.237575   96871 round_trippers.go:580]     Audit-Id: 3c8210df-096b-472e-8bf6-fb214c42e3f9
	I0731 10:53:12.237583   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:12.237591   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:12.237603   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:12.237705   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:12.735337   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:12.735355   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:12.735363   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:12.735369   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:12.737461   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:12.737483   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:12.737494   96871 round_trippers.go:580]     Audit-Id: 3448f48e-437e-490e-810b-1268a288a5ea
	I0731 10:53:12.737502   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:12.737511   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:12.737520   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:12.737531   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:12.737541   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:12 GMT
	I0731 10:53:12.737663   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:12.737972   96871 node_ready.go:58] node "multinode-776386" has status "Ready":"False"
	I0731 10:53:13.235229   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:13.235249   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:13.235257   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:13.235263   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:13.237477   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:13.237497   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:13.237507   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:13.237515   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:13.237524   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:13.237537   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:13 GMT
	I0731 10:53:13.237551   96871 round_trippers.go:580]     Audit-Id: 6c2468d1-2e61-47e7-a8cf-2a848b79497e
	I0731 10:53:13.237563   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:13.237660   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:13.735293   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:13.735314   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:13.735322   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:13.735328   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:13.737546   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:13.737566   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:13.737575   96871 round_trippers.go:580]     Audit-Id: 29b9b509-2511-4ed8-b807-f9d5cb674ac3
	I0731 10:53:13.737582   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:13.737590   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:13.737598   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:13.737608   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:13.737621   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:13 GMT
	I0731 10:53:13.737790   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:14.235405   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:14.235425   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:14.235434   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:14.235440   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:14.237900   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:14.237926   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:14.237936   96871 round_trippers.go:580]     Audit-Id: 0b4687c8-b3db-42f5-b270-cd2c0c951314
	I0731 10:53:14.237944   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:14.237951   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:14.237959   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:14.237973   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:14.237990   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:14 GMT
	I0731 10:53:14.238100   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:14.735436   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:14.735459   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:14.735471   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:14.735483   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:14.737770   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:14.737786   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:14.737793   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:14.737799   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:14.737804   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:14 GMT
	I0731 10:53:14.737809   96871 round_trippers.go:580]     Audit-Id: b2640b6e-18b7-4506-90b2-7e244814160f
	I0731 10:53:14.737815   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:14.737820   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:14.737970   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:14.738326   96871 node_ready.go:58] node "multinode-776386" has status "Ready":"False"
	I0731 10:53:15.235665   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:15.235687   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:15.235695   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:15.235701   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:15.238082   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:15.238105   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:15.238118   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:15.238126   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:15.238136   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:15 GMT
	I0731 10:53:15.238175   96871 round_trippers.go:580]     Audit-Id: e4e57dc4-c9e0-45c3-b7ee-a314ba5dd3c5
	I0731 10:53:15.238202   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:15.238213   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:15.238318   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:15.734845   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:15.734865   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:15.734873   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:15.734880   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:15.737043   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:15.737068   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:15.737079   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:15.737086   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:15.737092   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:15 GMT
	I0731 10:53:15.737098   96871 round_trippers.go:580]     Audit-Id: 5da87a35-fa68-426c-b6eb-43522822d696
	I0731 10:53:15.737103   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:15.737108   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:15.737297   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:16.235430   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:16.235454   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:16.235466   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:16.235477   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:16.237615   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:16.237635   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:16.237645   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:16.237653   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:16.237660   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:16.237668   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:16 GMT
	I0731 10:53:16.237676   96871 round_trippers.go:580]     Audit-Id: 0dff5446-e02e-498a-b117-4e7aa734084b
	I0731 10:53:16.237689   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:16.237801   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:16.734704   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:16.734724   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:16.734732   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:16.734738   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:16.736934   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:16.736959   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:16.736970   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:16.736979   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:16.736988   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:16 GMT
	I0731 10:53:16.736998   96871 round_trippers.go:580]     Audit-Id: d64cb1df-583b-42cb-9b10-f6cfdd967c56
	I0731 10:53:16.737006   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:16.737013   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:16.737144   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:17.234684   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:17.234704   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:17.234712   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:17.234718   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:17.236914   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:17.236938   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:17.236947   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:17.236953   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:17 GMT
	I0731 10:53:17.236958   96871 round_trippers.go:580]     Audit-Id: 5b95b572-0e51-496f-b8fe-b63a50991419
	I0731 10:53:17.236963   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:17.236969   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:17.236974   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:17.237073   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:17.237494   96871 node_ready.go:58] node "multinode-776386" has status "Ready":"False"
	I0731 10:53:17.735599   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:17.735624   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:17.735637   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:17.735648   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:17.737853   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:17.737874   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:17.737885   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:17.737896   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:17.737906   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:17 GMT
	I0731 10:53:17.737918   96871 round_trippers.go:580]     Audit-Id: 3b77b86d-9150-4193-9f1d-66c10b13d26a
	I0731 10:53:17.737930   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:17.737939   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:17.738070   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:18.235457   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:18.235483   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:18.235496   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:18.235506   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:18.237830   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:18.237854   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:18.237865   96871 round_trippers.go:580]     Audit-Id: 72be3b5e-c29b-4c9b-b36a-921939ff8b76
	I0731 10:53:18.237874   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:18.237882   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:18.237891   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:18.237906   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:18.237916   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:18 GMT
	I0731 10:53:18.238029   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:18.734917   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:18.734938   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:18.734946   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:18.734953   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:18.737520   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:18.737544   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:18.737552   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:18 GMT
	I0731 10:53:18.737558   96871 round_trippers.go:580]     Audit-Id: 2e33e751-edbc-4c94-8dbb-37ced1b020bd
	I0731 10:53:18.737563   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:18.737568   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:18.737574   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:18.737580   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:18.737701   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:19.235284   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:19.235305   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:19.235316   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:19.235325   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:19.237917   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:19.237935   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:19.237941   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:19 GMT
	I0731 10:53:19.237947   96871 round_trippers.go:580]     Audit-Id: f0f74f30-b615-4434-9902-139d581b7352
	I0731 10:53:19.237953   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:19.237958   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:19.237966   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:19.237974   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:19.238087   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"364","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 6141 chars]
	I0731 10:53:19.238426   96871 node_ready.go:58] node "multinode-776386" has status "Ready":"False"
	I0731 10:53:19.735411   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:19.735429   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:19.735437   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:19.735443   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:19.737279   96871 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
	I0731 10:53:19.737301   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:19.737311   96871 round_trippers.go:580]     Audit-Id: e737f136-bc61-4773-aa9b-eb34ecf0fce0
	I0731 10:53:19.737319   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:19.737329   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:19.737341   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:19.737351   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:19.737363   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:19 GMT
	I0731 10:53:19.737484   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"428","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 5947 chars]
	I0731 10:53:19.737809   96871 node_ready.go:49] node "multinode-776386" has status "Ready":"True"
	I0731 10:53:19.737827   96871 node_ready.go:38] duration metric: took 31.015686626s waiting for node "multinode-776386" to be "Ready" ...
	I0731 10:53:19.737838   96871 pod_ready.go:35] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
	I0731 10:53:19.737894   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods
	I0731 10:53:19.737904   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:19.737915   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:19.737926   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:19.740864   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:19.740898   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:19.740909   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:19.740918   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:19.740926   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:19.740936   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:19 GMT
	I0731 10:53:19.740950   96871 round_trippers.go:580]     Audit-Id: 520b95c6-3008-4e58-b240-10b75a8958f6
	I0731 10:53:19.740956   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:19.741392   96871 request.go:1188] Response Body: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"428"},"items":[{"metadata":{"name":"coredns-5d78c9869d-w86c5","generateName":"coredns-5d78c9869d-","namespace":"kube-system","uid":"fcb57c8f-9276-4e70-a275-2865ac997394","resourceVersion":"387","creationTimestamp":"2023-07-31T10:52:48Z","labels":{"k8s-app":"kube-dns","pod-template-hash":"5d78c9869d"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"ReplicaSet","name":"coredns-5d78c9869d","uid":"78328e85-a38b-4605-8363-2be69c87f749","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:52:48Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:k8s-app":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"78328e85-a38b-4605-8363-2be69c87f749\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:podAntiAffinity":{".":{},"f
:preferredDuringSchedulingIgnoredDuringExecution":{}}},"f:containers":{ [truncated 52944 chars]
	I0731 10:53:19.745565   96871 pod_ready.go:78] waiting up to 6m0s for pod "coredns-5d78c9869d-w86c5" in "kube-system" namespace to be "Ready" ...
	I0731 10:53:19.745646   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/coredns-5d78c9869d-w86c5
	I0731 10:53:19.745657   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:19.745669   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:19.745682   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:19.747584   96871 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
	I0731 10:53:19.747603   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:19.747614   96871 round_trippers.go:580]     Audit-Id: d97b1253-909a-44a3-b287-8cdaaa7664da
	I0731 10:53:19.747624   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:19.747633   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:19.747642   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:19.747651   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:19.747663   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:19 GMT
	I0731 10:53:19.747760   96871 request.go:1188] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"coredns-5d78c9869d-w86c5","generateName":"coredns-5d78c9869d-","namespace":"kube-system","uid":"fcb57c8f-9276-4e70-a275-2865ac997394","resourceVersion":"429","creationTimestamp":"2023-07-31T10:52:48Z","labels":{"k8s-app":"kube-dns","pod-template-hash":"5d78c9869d"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"ReplicaSet","name":"coredns-5d78c9869d","uid":"78328e85-a38b-4605-8363-2be69c87f749","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:52:48Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:k8s-app":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"78328e85-a38b-4605-8363-2be69c87f749\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:podAntiAffinity":{".":{},"f:preferredDuringSchedulingIgnoredDuringExecution":{
}}},"f:containers":{"k:{\"name\":\"coredns\"}":{".":{},"f:args":{},"f:i [truncated 4762 chars]
	I0731 10:53:19.748136   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:19.748149   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:19.748156   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:19.748165   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:19.752258   96871 round_trippers.go:574] Response Status: 200 OK in 4 milliseconds
	I0731 10:53:19.752275   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:19.752285   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:19.752292   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:19.752299   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:19 GMT
	I0731 10:53:19.752311   96871 round_trippers.go:580]     Audit-Id: 60011e2d-0312-4d8c-9232-2f99090936ef
	I0731 10:53:19.752320   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:19.752334   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:19.752440   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"428","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 5947 chars]
	I0731 10:53:19.752888   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/coredns-5d78c9869d-w86c5
	I0731 10:53:19.752901   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:19.752909   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:19.752915   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:19.754803   96871 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
	I0731 10:53:19.754827   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:19.754837   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:19.754847   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:19.754855   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:19 GMT
	I0731 10:53:19.754865   96871 round_trippers.go:580]     Audit-Id: 5b878eb7-8c9f-4067-97fb-5e0308e3c270
	I0731 10:53:19.754875   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:19.754888   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:19.755014   96871 request.go:1188] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"coredns-5d78c9869d-w86c5","generateName":"coredns-5d78c9869d-","namespace":"kube-system","uid":"fcb57c8f-9276-4e70-a275-2865ac997394","resourceVersion":"433","creationTimestamp":"2023-07-31T10:52:48Z","labels":{"k8s-app":"kube-dns","pod-template-hash":"5d78c9869d"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"ReplicaSet","name":"coredns-5d78c9869d","uid":"78328e85-a38b-4605-8363-2be69c87f749","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:52:48Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:k8s-app":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"78328e85-a38b-4605-8363-2be69c87f749\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:podAntiAffinity":{".":{},"f:preferredDuringSchedulingIgnoredDuringExecution":{
}}},"f:containers":{"k:{\"name\":\"coredns\"}":{".":{},"f:args":{},"f:i [truncated 6150 chars]
	I0731 10:53:19.755491   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:19.755506   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:19.755516   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:19.755524   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:19.756963   96871 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
	I0731 10:53:19.756983   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:19.756994   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:19 GMT
	I0731 10:53:19.757003   96871 round_trippers.go:580]     Audit-Id: 189f8914-6067-46f7-a517-0fa7138e57c7
	I0731 10:53:19.757012   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:19.757021   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:19.757031   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:19.757043   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:19.757155   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"428","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 5947 chars]
	I0731 10:53:20.258297   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/coredns-5d78c9869d-w86c5
	I0731 10:53:20.258317   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:20.258325   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:20.258331   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:20.260522   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:20.260544   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:20.260553   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:20.260562   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:20.260570   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:20.260594   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:20 GMT
	I0731 10:53:20.260602   96871 round_trippers.go:580]     Audit-Id: 26bc8829-85dc-45a5-a92e-b26b0f6149ce
	I0731 10:53:20.260609   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:20.260739   96871 request.go:1188] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"coredns-5d78c9869d-w86c5","generateName":"coredns-5d78c9869d-","namespace":"kube-system","uid":"fcb57c8f-9276-4e70-a275-2865ac997394","resourceVersion":"433","creationTimestamp":"2023-07-31T10:52:48Z","labels":{"k8s-app":"kube-dns","pod-template-hash":"5d78c9869d"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"ReplicaSet","name":"coredns-5d78c9869d","uid":"78328e85-a38b-4605-8363-2be69c87f749","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:52:48Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:k8s-app":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"78328e85-a38b-4605-8363-2be69c87f749\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:podAntiAffinity":{".":{},"f:preferredDuringSchedulingIgnoredDuringExecution":{
}}},"f:containers":{"k:{\"name\":\"coredns\"}":{".":{},"f:args":{},"f:i [truncated 6150 chars]
	I0731 10:53:20.261163   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:20.261174   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:20.261182   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:20.261191   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:20.263338   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:20.263353   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:20.263359   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:20 GMT
	I0731 10:53:20.263365   96871 round_trippers.go:580]     Audit-Id: 18756a90-03c7-49c9-ba28-87d66fd826b8
	I0731 10:53:20.263372   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:20.263381   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:20.263393   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:20.263403   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:20.263524   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"428","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 5947 chars]
	I0731 10:53:20.758108   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/coredns-5d78c9869d-w86c5
	I0731 10:53:20.758128   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:20.758136   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:20.758142   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:20.760382   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:20.760407   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:20.760416   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:20.760422   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:20.760427   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:20.760433   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:20.760438   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:20 GMT
	I0731 10:53:20.760443   96871 round_trippers.go:580]     Audit-Id: e3d13ecb-9273-4d09-b118-f79b6b7eebcc
	I0731 10:53:20.760564   96871 request.go:1188] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"coredns-5d78c9869d-w86c5","generateName":"coredns-5d78c9869d-","namespace":"kube-system","uid":"fcb57c8f-9276-4e70-a275-2865ac997394","resourceVersion":"444","creationTimestamp":"2023-07-31T10:52:48Z","labels":{"k8s-app":"kube-dns","pod-template-hash":"5d78c9869d"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"ReplicaSet","name":"coredns-5d78c9869d","uid":"78328e85-a38b-4605-8363-2be69c87f749","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:52:48Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:k8s-app":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"78328e85-a38b-4605-8363-2be69c87f749\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:podAntiAffinity":{".":{},"f:preferredDuringSchedulingIgnoredDuringExecution":{
}}},"f:containers":{"k:{\"name\":\"coredns\"}":{".":{},"f:args":{},"f:i [truncated 6263 chars]
	I0731 10:53:20.761021   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:20.761038   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:20.761049   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:20.761057   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:20.763148   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:20.763168   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:20.763178   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:20 GMT
	I0731 10:53:20.763186   96871 round_trippers.go:580]     Audit-Id: a2cef312-1278-4087-a53c-251a98b27bac
	I0731 10:53:20.763194   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:20.763202   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:20.763214   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:20.763232   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:20.763321   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"428","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 5947 chars]
	I0731 10:53:20.763588   96871 pod_ready.go:92] pod "coredns-5d78c9869d-w86c5" in "kube-system" namespace has status "Ready":"True"
	I0731 10:53:20.763600   96871 pod_ready.go:81] duration metric: took 1.018011995s waiting for pod "coredns-5d78c9869d-w86c5" in "kube-system" namespace to be "Ready" ...
	I0731 10:53:20.763608   96871 pod_ready.go:78] waiting up to 6m0s for pod "etcd-multinode-776386" in "kube-system" namespace to be "Ready" ...
	I0731 10:53:20.763653   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/etcd-multinode-776386
	I0731 10:53:20.763661   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:20.763668   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:20.763674   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:20.767885   96871 round_trippers.go:574] Response Status: 200 OK in 4 milliseconds
	I0731 10:53:20.767904   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:20.767913   96871 round_trippers.go:580]     Audit-Id: 6f8eefa3-f9ef-488d-8c20-05af1e1b266f
	I0731 10:53:20.767922   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:20.767929   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:20.767937   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:20.767950   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:20.767958   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:20 GMT
	I0731 10:53:20.768072   96871 request.go:1188] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"etcd-multinode-776386","namespace":"kube-system","uid":"09d3804e-7c35-4295-af6c-b2f481c4903d","resourceVersion":"324","creationTimestamp":"2023-07-31T10:52:34Z","labels":{"component":"etcd","tier":"control-plane"},"annotations":{"kubeadm.kubernetes.io/etcd.advertise-client-urls":"https://192.168.58.2:2379","kubernetes.io/config.hash":"8337fb7d52d96869ff08a0ef3d4aa6f6","kubernetes.io/config.mirror":"8337fb7d52d96869ff08a0ef3d4aa6f6","kubernetes.io/config.seen":"2023-07-31T10:52:34.447469250Z","kubernetes.io/config.source":"file"},"ownerReferences":[{"apiVersion":"v1","kind":"Node","name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","controller":true}],"managedFields":[{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:52:34Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:kubeadm.kubernetes.io/etcd.advertise-cl
ient-urls":{},"f:kubernetes.io/config.hash":{},"f:kubernetes.io/config. [truncated 5833 chars]
	I0731 10:53:20.768437   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:20.768449   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:20.768456   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:20.768464   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:20.770423   96871 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
	I0731 10:53:20.770442   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:20.770452   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:20.770461   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:20.770468   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:20.770479   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:20.770488   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:20 GMT
	I0731 10:53:20.770500   96871 round_trippers.go:580]     Audit-Id: f8ce4a2e-4c59-4d8a-97e4-ed747b0727ad
	I0731 10:53:20.770596   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"428","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 5947 chars]
	I0731 10:53:20.770856   96871 pod_ready.go:92] pod "etcd-multinode-776386" in "kube-system" namespace has status "Ready":"True"
	I0731 10:53:20.770868   96871 pod_ready.go:81] duration metric: took 7.255565ms waiting for pod "etcd-multinode-776386" in "kube-system" namespace to be "Ready" ...
	I0731 10:53:20.770878   96871 pod_ready.go:78] waiting up to 6m0s for pod "kube-apiserver-multinode-776386" in "kube-system" namespace to be "Ready" ...
	I0731 10:53:20.770917   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-multinode-776386
	I0731 10:53:20.770924   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:20.770931   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:20.770937   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:20.772688   96871 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
	I0731 10:53:20.772707   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:20.772717   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:20 GMT
	I0731 10:53:20.772725   96871 round_trippers.go:580]     Audit-Id: 9fe5e461-d01b-4f73-bf43-8da9f49c36d9
	I0731 10:53:20.772744   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:20.772760   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:20.772769   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:20.772779   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:20.772910   96871 request.go:1188] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"kube-apiserver-multinode-776386","namespace":"kube-system","uid":"cd8aaec2-83b4-4346-9ad9-9167afe8b68f","resourceVersion":"328","creationTimestamp":"2023-07-31T10:52:34Z","labels":{"component":"kube-apiserver","tier":"control-plane"},"annotations":{"kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint":"192.168.58.2:8443","kubernetes.io/config.hash":"680a3d2f61a1a9343330816821eb0e8f","kubernetes.io/config.mirror":"680a3d2f61a1a9343330816821eb0e8f","kubernetes.io/config.seen":"2023-07-31T10:52:34.447473226Z","kubernetes.io/config.source":"file"},"ownerReferences":[{"apiVersion":"v1","kind":"Node","name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","controller":true}],"managedFields":[{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:52:34Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:kubeadm.kube
rnetes.io/kube-apiserver.advertise-address.endpoint":{},"f:kubernetes.i [truncated 8219 chars]
	I0731 10:53:20.773309   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:20.773322   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:20.773329   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:20.773336   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:20.775164   96871 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
	I0731 10:53:20.775179   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:20.775185   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:20.775190   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:20.775196   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:20 GMT
	I0731 10:53:20.775201   96871 round_trippers.go:580]     Audit-Id: 037d7a6f-7747-4a6d-9347-5064817aa69f
	I0731 10:53:20.775206   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:20.775211   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:20.775358   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"428","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 5947 chars]
	I0731 10:53:20.775652   96871 pod_ready.go:92] pod "kube-apiserver-multinode-776386" in "kube-system" namespace has status "Ready":"True"
	I0731 10:53:20.775665   96871 pod_ready.go:81] duration metric: took 4.781979ms waiting for pod "kube-apiserver-multinode-776386" in "kube-system" namespace to be "Ready" ...
	I0731 10:53:20.775672   96871 pod_ready.go:78] waiting up to 6m0s for pod "kube-controller-manager-multinode-776386" in "kube-system" namespace to be "Ready" ...
	I0731 10:53:20.775726   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-multinode-776386
	I0731 10:53:20.775737   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:20.775748   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:20.775760   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:20.777539   96871 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
	I0731 10:53:20.777554   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:20.777564   96871 round_trippers.go:580]     Audit-Id: 6f5a0be5-c16a-4b44-b560-22d1f0242d76
	I0731 10:53:20.777572   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:20.777580   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:20.777588   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:20.777597   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:20.777613   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:20 GMT
	I0731 10:53:20.777739   96871 request.go:1188] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"kube-controller-manager-multinode-776386","namespace":"kube-system","uid":"297aa029-28d8-4509-bda1-56f44d45e10e","resourceVersion":"293","creationTimestamp":"2023-07-31T10:52:32Z","labels":{"component":"kube-controller-manager","tier":"control-plane"},"annotations":{"kubernetes.io/config.hash":"585dd60c757682ef83c5c82367a18a2f","kubernetes.io/config.mirror":"585dd60c757682ef83c5c82367a18a2f","kubernetes.io/config.seen":"2023-07-31T10:52:28.461388115Z","kubernetes.io/config.source":"file"},"ownerReferences":[{"apiVersion":"v1","kind":"Node","name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","controller":true}],"managedFields":[{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:52:32Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:kubernetes.io/config.hash":{},"f:kubernetes.io/config.mirror":{},"f:kubernetes.i
o/config.seen":{},"f:kubernetes.io/config.source":{}},"f:labels":{".":{ [truncated 7794 chars]
	I0731 10:53:20.936430   96871 request.go:628] Waited for 158.309445ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:20.936509   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:20.936521   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:20.936531   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:20.936544   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:20.938843   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:20.938866   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:20.938877   96871 round_trippers.go:580]     Audit-Id: b1d5ca9f-a565-4a08-951f-b05ec789d132
	I0731 10:53:20.938886   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:20.938895   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:20.938904   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:20.938914   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:20.938923   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:20 GMT
	I0731 10:53:20.939010   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"428","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 5947 chars]
	I0731 10:53:20.939350   96871 pod_ready.go:92] pod "kube-controller-manager-multinode-776386" in "kube-system" namespace has status "Ready":"True"
	I0731 10:53:20.939364   96871 pod_ready.go:81] duration metric: took 163.68551ms waiting for pod "kube-controller-manager-multinode-776386" in "kube-system" namespace to be "Ready" ...
	I0731 10:53:20.939374   96871 pod_ready.go:78] waiting up to 6m0s for pod "kube-proxy-59xqp" in "kube-system" namespace to be "Ready" ...
	I0731 10:53:21.135827   96871 request.go:628] Waited for 196.398502ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-proxy-59xqp
	I0731 10:53:21.135914   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-proxy-59xqp
	I0731 10:53:21.135926   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:21.135938   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:21.135946   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:21.138223   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:21.138248   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:21.138258   96871 round_trippers.go:580]     Audit-Id: fe33d19d-03aa-407f-ae16-ff9ba0aa1139
	I0731 10:53:21.138266   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:21.138274   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:21.138283   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:21.138292   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:21.138305   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:21 GMT
	I0731 10:53:21.138470   96871 request.go:1188] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"kube-proxy-59xqp","generateName":"kube-proxy-","namespace":"kube-system","uid":"e086520b-af9b-4c2e-adc1-cecdf0026890","resourceVersion":"407","creationTimestamp":"2023-07-31T10:52:47Z","labels":{"controller-revision-hash":"56999f657b","k8s-app":"kube-proxy","pod-template-generation":"1"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"DaemonSet","name":"kube-proxy","uid":"8c4cc344-625b-46a6-ad67-8a006a415327","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:52:47Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:controller-revision-hash":{},"f:k8s-app":{},"f:pod-template-generation":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"8c4cc344-625b-46a6-ad67-8a006a415327\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:nodeAffinity":{".":{},"f:r
equiredDuringSchedulingIgnoredDuringExecution":{}}},"f:containers":{"k: [truncated 5510 chars]
	I0731 10:53:21.335778   96871 request.go:628] Waited for 196.873273ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:21.335835   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:21.335842   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:21.335853   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:21.335873   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:21.338176   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:21.338214   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:21.338225   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:21.338234   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:21.338244   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:21.338254   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:21 GMT
	I0731 10:53:21.338267   96871 round_trippers.go:580]     Audit-Id: f61f0f50-c285-43f7-a28b-db6c4f4d7cda
	I0731 10:53:21.338281   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:21.338382   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"428","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 5947 chars]
	I0731 10:53:21.338707   96871 pod_ready.go:92] pod "kube-proxy-59xqp" in "kube-system" namespace has status "Ready":"True"
	I0731 10:53:21.338722   96871 pod_ready.go:81] duration metric: took 399.342616ms waiting for pod "kube-proxy-59xqp" in "kube-system" namespace to be "Ready" ...
	I0731 10:53:21.338731   96871 pod_ready.go:78] waiting up to 6m0s for pod "kube-scheduler-multinode-776386" in "kube-system" namespace to be "Ready" ...
	I0731 10:53:21.536163   96871 request.go:628] Waited for 197.359452ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-scheduler-multinode-776386
	I0731 10:53:21.536229   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-scheduler-multinode-776386
	I0731 10:53:21.536240   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:21.536248   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:21.536254   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:21.538569   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:21.538589   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:21.538596   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:21.538601   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:21.538607   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:21.538612   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:21.538618   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:21 GMT
	I0731 10:53:21.538623   96871 round_trippers.go:580]     Audit-Id: 091a9ed1-0820-4f6f-9e59-1174e34d4343
	I0731 10:53:21.538756   96871 request.go:1188] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"kube-scheduler-multinode-776386","namespace":"kube-system","uid":"0feb8916-44df-4b76-88e8-2856a50f34b7","resourceVersion":"291","creationTimestamp":"2023-07-31T10:52:34Z","labels":{"component":"kube-scheduler","tier":"control-plane"},"annotations":{"kubernetes.io/config.hash":"828045a76f6580abdd29c2ef20a02983","kubernetes.io/config.mirror":"828045a76f6580abdd29c2ef20a02983","kubernetes.io/config.seen":"2023-07-31T10:52:28.461389129Z","kubernetes.io/config.source":"file"},"ownerReferences":[{"apiVersion":"v1","kind":"Node","name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","controller":true}],"managedFields":[{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:52:34Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:kubernetes.io/config.hash":{},"f:kubernetes.io/config.mirror":{},"f:kubernetes.io/config.seen":{},
"f:kubernetes.io/config.source":{}},"f:labels":{".":{},"f:component":{} [truncated 4676 chars]
	I0731 10:53:21.735424   96871 request.go:628] Waited for 196.301638ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:21.735482   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:53:21.735487   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:21.735499   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:21.735509   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:21.737908   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:21.737928   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:21.737938   96871 round_trippers.go:580]     Audit-Id: 49ab753a-a944-432e-985c-acee724c975c
	I0731 10:53:21.737946   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:21.737956   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:21.737967   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:21.737977   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:21.737985   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:21 GMT
	I0731 10:53:21.738123   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"428","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 5947 chars]
	I0731 10:53:21.738487   96871 pod_ready.go:92] pod "kube-scheduler-multinode-776386" in "kube-system" namespace has status "Ready":"True"
	I0731 10:53:21.738503   96871 pod_ready.go:81] duration metric: took 399.758196ms waiting for pod "kube-scheduler-multinode-776386" in "kube-system" namespace to be "Ready" ...
	I0731 10:53:21.738517   96871 pod_ready.go:38] duration metric: took 2.000664934s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
	I0731 10:53:21.738534   96871 api_server.go:52] waiting for apiserver process to appear ...
	I0731 10:53:21.738599   96871 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I0731 10:53:21.748154   96871 command_runner.go:130] > 1453
	I0731 10:53:21.748882   96871 api_server.go:72] duration metric: took 33.137618199s to wait for apiserver process to appear ...
	I0731 10:53:21.748900   96871 api_server.go:88] waiting for apiserver healthz status ...
	I0731 10:53:21.748916   96871 api_server.go:253] Checking apiserver healthz at https://192.168.58.2:8443/healthz ...
	I0731 10:53:21.753759   96871 api_server.go:279] https://192.168.58.2:8443/healthz returned 200:
	ok
	I0731 10:53:21.753816   96871 round_trippers.go:463] GET https://192.168.58.2:8443/version
	I0731 10:53:21.753823   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:21.753831   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:21.753839   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:21.754699   96871 round_trippers.go:574] Response Status: 200 OK in 0 milliseconds
	I0731 10:53:21.754713   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:21.754720   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:21 GMT
	I0731 10:53:21.754725   96871 round_trippers.go:580]     Audit-Id: 6c892165-5634-43e8-bf75-acaa1ad1be32
	I0731 10:53:21.754731   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:21.754737   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:21.754744   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:21.754750   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:21.754758   96871 round_trippers.go:580]     Content-Length: 263
	I0731 10:53:21.754772   96871 request.go:1188] Response Body: {
	  "major": "1",
	  "minor": "27",
	  "gitVersion": "v1.27.3",
	  "gitCommit": "25b4e43193bcda6c7328a6d147b1fb73a33f1598",
	  "gitTreeState": "clean",
	  "buildDate": "2023-06-14T09:47:40Z",
	  "goVersion": "go1.20.5",
	  "compiler": "gc",
	  "platform": "linux/amd64"
	}
	I0731 10:53:21.754841   96871 api_server.go:141] control plane version: v1.27.3
	I0731 10:53:21.754855   96871 api_server.go:131] duration metric: took 5.949604ms to wait for apiserver health ...
	I0731 10:53:21.754862   96871 system_pods.go:43] waiting for kube-system pods to appear ...
	I0731 10:53:21.936283   96871 request.go:628] Waited for 181.343424ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods
	I0731 10:53:21.936331   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods
	I0731 10:53:21.936335   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:21.936344   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:21.936350   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:21.941541   96871 round_trippers.go:574] Response Status: 200 OK in 5 milliseconds
	I0731 10:53:21.941565   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:21.941575   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:21 GMT
	I0731 10:53:21.941585   96871 round_trippers.go:580]     Audit-Id: 433cd67c-3441-4da8-b30e-30528909f8ca
	I0731 10:53:21.941594   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:21.941603   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:21.941615   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:21.941631   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:21.943011   96871 request.go:1188] Response Body: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"448"},"items":[{"metadata":{"name":"coredns-5d78c9869d-w86c5","generateName":"coredns-5d78c9869d-","namespace":"kube-system","uid":"fcb57c8f-9276-4e70-a275-2865ac997394","resourceVersion":"444","creationTimestamp":"2023-07-31T10:52:48Z","labels":{"k8s-app":"kube-dns","pod-template-hash":"5d78c9869d"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"ReplicaSet","name":"coredns-5d78c9869d","uid":"78328e85-a38b-4605-8363-2be69c87f749","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:52:48Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:k8s-app":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"78328e85-a38b-4605-8363-2be69c87f749\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:podAntiAffinity":{".":{},"f
:preferredDuringSchedulingIgnoredDuringExecution":{}}},"f:containers":{ [truncated 55613 chars]
	I0731 10:53:21.945737   96871 system_pods.go:59] 8 kube-system pods found
	I0731 10:53:21.945760   96871 system_pods.go:61] "coredns-5d78c9869d-w86c5" [fcb57c8f-9276-4e70-a275-2865ac997394] Running
	I0731 10:53:21.945765   96871 system_pods.go:61] "etcd-multinode-776386" [09d3804e-7c35-4295-af6c-b2f481c4903d] Running
	I0731 10:53:21.945769   96871 system_pods.go:61] "kindnet-zrs4n" [f8716d13-387d-4ea9-a4f0-db398d7e89d8] Running
	I0731 10:53:21.945773   96871 system_pods.go:61] "kube-apiserver-multinode-776386" [cd8aaec2-83b4-4346-9ad9-9167afe8b68f] Running
	I0731 10:53:21.945778   96871 system_pods.go:61] "kube-controller-manager-multinode-776386" [297aa029-28d8-4509-bda1-56f44d45e10e] Running
	I0731 10:53:21.945781   96871 system_pods.go:61] "kube-proxy-59xqp" [e086520b-af9b-4c2e-adc1-cecdf0026890] Running
	I0731 10:53:21.945785   96871 system_pods.go:61] "kube-scheduler-multinode-776386" [0feb8916-44df-4b76-88e8-2856a50f34b7] Running
	I0731 10:53:21.945789   96871 system_pods.go:61] "storage-provisioner" [c0c13dc5-a5eb-4156-af22-1a95ae2eedd9] Running
	I0731 10:53:21.945794   96871 system_pods.go:74] duration metric: took 190.928329ms to wait for pod list to return data ...
	I0731 10:53:21.945803   96871 default_sa.go:34] waiting for default service account to be created ...
	I0731 10:53:22.136279   96871 request.go:628] Waited for 190.40056ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.58.2:8443/api/v1/namespaces/default/serviceaccounts
	I0731 10:53:22.136335   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/default/serviceaccounts
	I0731 10:53:22.136358   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:22.136370   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:22.136377   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:22.138676   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:22.138697   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:22.138704   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:22.138709   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:22.138715   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:22.138722   96871 round_trippers.go:580]     Content-Length: 261
	I0731 10:53:22.138727   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:22 GMT
	I0731 10:53:22.138732   96871 round_trippers.go:580]     Audit-Id: 840c8939-9cde-462b-863b-f181c80cbac8
	I0731 10:53:22.138738   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:22.138762   96871 request.go:1188] Response Body: {"kind":"ServiceAccountList","apiVersion":"v1","metadata":{"resourceVersion":"448"},"items":[{"metadata":{"name":"default","namespace":"default","uid":"518a5c4d-a2bb-4d6b-be7d-ad1454b68675","resourceVersion":"355","creationTimestamp":"2023-07-31T10:52:48Z"}}]}
	I0731 10:53:22.138964   96871 default_sa.go:45] found service account: "default"
	I0731 10:53:22.138977   96871 default_sa.go:55] duration metric: took 193.168955ms for default service account to be created ...
	I0731 10:53:22.138985   96871 system_pods.go:116] waiting for k8s-apps to be running ...
	I0731 10:53:22.336436   96871 request.go:628] Waited for 197.36953ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods
	I0731 10:53:22.336487   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods
	I0731 10:53:22.336492   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:22.336499   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:22.336505   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:22.339657   96871 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
	I0731 10:53:22.339686   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:22.339696   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:22 GMT
	I0731 10:53:22.339705   96871 round_trippers.go:580]     Audit-Id: 554629cf-a205-497b-b6d4-f205dbb4e170
	I0731 10:53:22.339714   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:22.339722   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:22.339731   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:22.339738   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:22.340108   96871 request.go:1188] Response Body: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"449"},"items":[{"metadata":{"name":"coredns-5d78c9869d-w86c5","generateName":"coredns-5d78c9869d-","namespace":"kube-system","uid":"fcb57c8f-9276-4e70-a275-2865ac997394","resourceVersion":"444","creationTimestamp":"2023-07-31T10:52:48Z","labels":{"k8s-app":"kube-dns","pod-template-hash":"5d78c9869d"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"ReplicaSet","name":"coredns-5d78c9869d","uid":"78328e85-a38b-4605-8363-2be69c87f749","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:52:48Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:k8s-app":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"78328e85-a38b-4605-8363-2be69c87f749\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:podAntiAffinity":{".":{},"f
:preferredDuringSchedulingIgnoredDuringExecution":{}}},"f:containers":{ [truncated 55613 chars]
	I0731 10:53:22.341776   96871 system_pods.go:86] 8 kube-system pods found
	I0731 10:53:22.341793   96871 system_pods.go:89] "coredns-5d78c9869d-w86c5" [fcb57c8f-9276-4e70-a275-2865ac997394] Running
	I0731 10:53:22.341798   96871 system_pods.go:89] "etcd-multinode-776386" [09d3804e-7c35-4295-af6c-b2f481c4903d] Running
	I0731 10:53:22.341802   96871 system_pods.go:89] "kindnet-zrs4n" [f8716d13-387d-4ea9-a4f0-db398d7e89d8] Running
	I0731 10:53:22.341806   96871 system_pods.go:89] "kube-apiserver-multinode-776386" [cd8aaec2-83b4-4346-9ad9-9167afe8b68f] Running
	I0731 10:53:22.341811   96871 system_pods.go:89] "kube-controller-manager-multinode-776386" [297aa029-28d8-4509-bda1-56f44d45e10e] Running
	I0731 10:53:22.341815   96871 system_pods.go:89] "kube-proxy-59xqp" [e086520b-af9b-4c2e-adc1-cecdf0026890] Running
	I0731 10:53:22.341819   96871 system_pods.go:89] "kube-scheduler-multinode-776386" [0feb8916-44df-4b76-88e8-2856a50f34b7] Running
	I0731 10:53:22.341823   96871 system_pods.go:89] "storage-provisioner" [c0c13dc5-a5eb-4156-af22-1a95ae2eedd9] Running
	I0731 10:53:22.341828   96871 system_pods.go:126] duration metric: took 202.840228ms to wait for k8s-apps to be running ...
	I0731 10:53:22.341837   96871 system_svc.go:44] waiting for kubelet service to be running ....
	I0731 10:53:22.341882   96871 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I0731 10:53:22.352411   96871 system_svc.go:56] duration metric: took 10.565583ms WaitForService to wait for kubelet.
	I0731 10:53:22.352440   96871 kubeadm.go:581] duration metric: took 33.741178315s to wait for : map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] ...
	I0731 10:53:22.352459   96871 node_conditions.go:102] verifying NodePressure condition ...
	I0731 10:53:22.535863   96871 request.go:628] Waited for 183.336343ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.58.2:8443/api/v1/nodes
	I0731 10:53:22.535919   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes
	I0731 10:53:22.535923   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:22.535931   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:22.535938   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:22.538301   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:22.538325   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:22.538338   96871 round_trippers.go:580]     Audit-Id: 02cae324-418e-4f94-b2d9-544d6d82e180
	I0731 10:53:22.538346   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:22.538353   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:22.538361   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:22.538373   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:22.538386   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:22 GMT
	I0731 10:53:22.538500   96871 request.go:1188] Response Body: {"kind":"NodeList","apiVersion":"v1","metadata":{"resourceVersion":"449"},"items":[{"metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"428","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields
":[{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":" [truncated 6000 chars]
	I0731 10:53:22.538895   96871 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
	I0731 10:53:22.538915   96871 node_conditions.go:123] node cpu capacity is 8
	I0731 10:53:22.538924   96871 node_conditions.go:105] duration metric: took 186.461752ms to run NodePressure ...
	I0731 10:53:22.538934   96871 start.go:228] waiting for startup goroutines ...
	I0731 10:53:22.538940   96871 start.go:233] waiting for cluster config update ...
	I0731 10:53:22.538949   96871 start.go:242] writing updated cluster config ...
	I0731 10:53:22.541340   96871 out.go:177] 
	I0731 10:53:22.543067   96871 config.go:182] Loaded profile config "multinode-776386": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.27.3
	I0731 10:53:22.543140   96871 profile.go:148] Saving config to /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/config.json ...
	I0731 10:53:22.545094   96871 out.go:177] * Starting worker node multinode-776386-m02 in cluster multinode-776386
	I0731 10:53:22.546617   96871 cache.go:122] Beginning downloading kic base image for docker with crio
	I0731 10:53:22.548095   96871 out.go:177] * Pulling base image ...
	I0731 10:53:22.549874   96871 preload.go:132] Checking if preload exists for k8s version v1.27.3 and runtime crio
	I0731 10:53:22.549888   96871 cache.go:57] Caching tarball of preloaded images
	I0731 10:53:22.549952   96871 image.go:79] Checking for gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 in local docker daemon
	I0731 10:53:22.549962   96871 preload.go:174] Found /home/jenkins/minikube-integration/16969-5799/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.27.3-cri-o-overlay-amd64.tar.lz4 in cache, skipping download
	I0731 10:53:22.550067   96871 cache.go:60] Finished verifying existence of preloaded tar for  v1.27.3 on crio
	I0731 10:53:22.550147   96871 profile.go:148] Saving config to /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/config.json ...
	I0731 10:53:22.565668   96871 image.go:83] Found gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 in local docker daemon, skipping pull
	I0731 10:53:22.565688   96871 cache.go:145] gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 exists in daemon, skipping load
	I0731 10:53:22.565700   96871 cache.go:195] Successfully downloaded all kic artifacts
	I0731 10:53:22.565725   96871 start.go:365] acquiring machines lock for multinode-776386-m02: {Name:mk3746ef97c1d6b5e2375958680d06fac4fe84fe Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0731 10:53:22.565832   96871 start.go:369] acquired machines lock for "multinode-776386-m02" in 85.769µs
	I0731 10:53:22.565859   96871 start.go:93] Provisioning new machine with config: &{Name:multinode-776386 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 Memory:2200 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.27.3 ClusterName:multinode-776386 Namespace:default APIServerName:minikubeCA APIServerNames:[] A
PIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP:192.168.58.2 Port:8443 KubernetesVersion:v1.27.3 ContainerRuntime:crio ControlPlane:true Worker:true} {Name:m02 IP: Port:0 KubernetesVersion:v1.27.3 ContainerRuntime:crio ControlPlane:false Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L Mou
ntGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0} &{Name:m02 IP: Port:0 KubernetesVersion:v1.27.3 ContainerRuntime:crio ControlPlane:false Worker:true}
	I0731 10:53:22.565946   96871 start.go:125] createHost starting for "m02" (driver="docker")
	I0731 10:53:22.568184   96871 out.go:204] * Creating docker container (CPUs=2, Memory=2200MB) ...
	I0731 10:53:22.568294   96871 start.go:159] libmachine.API.Create for "multinode-776386" (driver="docker")
	I0731 10:53:22.568323   96871 client.go:168] LocalClient.Create starting
	I0731 10:53:22.568405   96871 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem
	I0731 10:53:22.568439   96871 main.go:141] libmachine: Decoding PEM data...
	I0731 10:53:22.568462   96871 main.go:141] libmachine: Parsing certificate...
	I0731 10:53:22.568530   96871 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/16969-5799/.minikube/certs/cert.pem
	I0731 10:53:22.568561   96871 main.go:141] libmachine: Decoding PEM data...
	I0731 10:53:22.568577   96871 main.go:141] libmachine: Parsing certificate...
	I0731 10:53:22.568779   96871 cli_runner.go:164] Run: docker network inspect multinode-776386 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
	I0731 10:53:22.583728   96871 network_create.go:76] Found existing network {name:multinode-776386 subnet:0xc00051cd20 gateway:[0 0 0 0 0 0 0 0 0 0 255 255 192 168 58 1] mtu:1500}
	I0731 10:53:22.583759   96871 kic.go:117] calculated static IP "192.168.58.3" for the "multinode-776386-m02" container
	I0731 10:53:22.583816   96871 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
	I0731 10:53:22.599453   96871 cli_runner.go:164] Run: docker volume create multinode-776386-m02 --label name.minikube.sigs.k8s.io=multinode-776386-m02 --label created_by.minikube.sigs.k8s.io=true
	I0731 10:53:22.617007   96871 oci.go:103] Successfully created a docker volume multinode-776386-m02
	I0731 10:53:22.617079   96871 cli_runner.go:164] Run: docker run --rm --name multinode-776386-m02-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=multinode-776386-m02 --entrypoint /usr/bin/test -v multinode-776386-m02:/var gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 -d /var/lib
	I0731 10:53:23.120845   96871 oci.go:107] Successfully prepared a docker volume multinode-776386-m02
	I0731 10:53:23.120901   96871 preload.go:132] Checking if preload exists for k8s version v1.27.3 and runtime crio
	I0731 10:53:23.120921   96871 kic.go:190] Starting extracting preloaded images to volume ...
	I0731 10:53:23.120974   96871 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/16969-5799/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.27.3-cri-o-overlay-amd64.tar.lz4:/preloaded.tar:ro -v multinode-776386-m02:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 -I lz4 -xf /preloaded.tar -C /extractDir
	I0731 10:53:27.920312   96871 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/16969-5799/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.27.3-cri-o-overlay-amd64.tar.lz4:/preloaded.tar:ro -v multinode-776386-m02:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 -I lz4 -xf /preloaded.tar -C /extractDir: (4.799294337s)
	I0731 10:53:27.920352   96871 kic.go:199] duration metric: took 4.799427 seconds to extract preloaded images to volume
	W0731 10:53:27.920488   96871 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
	I0731 10:53:27.920598   96871 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
	I0731 10:53:27.969814   96871 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname multinode-776386-m02 --name multinode-776386-m02 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=multinode-776386-m02 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=multinode-776386-m02 --network multinode-776386 --ip 192.168.58.3 --volume multinode-776386-m02:/var --security-opt apparmor=unconfined --memory=2200mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631
	I0731 10:53:28.247406   96871 cli_runner.go:164] Run: docker container inspect multinode-776386-m02 --format={{.State.Running}}
	I0731 10:53:28.264426   96871 cli_runner.go:164] Run: docker container inspect multinode-776386-m02 --format={{.State.Status}}
	I0731 10:53:28.281002   96871 cli_runner.go:164] Run: docker exec multinode-776386-m02 stat /var/lib/dpkg/alternatives/iptables
	I0731 10:53:28.326829   96871 oci.go:144] the created container "multinode-776386-m02" has a running status.
	I0731 10:53:28.326868   96871 kic.go:221] Creating ssh key for kic: /home/jenkins/minikube-integration/16969-5799/.minikube/machines/multinode-776386-m02/id_rsa...
	I0731 10:53:28.451924   96871 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/machines/multinode-776386-m02/id_rsa.pub -> /home/docker/.ssh/authorized_keys
	I0731 10:53:28.451980   96871 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/16969-5799/.minikube/machines/multinode-776386-m02/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
	I0731 10:53:28.471513   96871 cli_runner.go:164] Run: docker container inspect multinode-776386-m02 --format={{.State.Status}}
	I0731 10:53:28.486405   96871 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
	I0731 10:53:28.486424   96871 kic_runner.go:114] Args: [docker exec --privileged multinode-776386-m02 chown docker:docker /home/docker/.ssh/authorized_keys]
	I0731 10:53:28.550441   96871 cli_runner.go:164] Run: docker container inspect multinode-776386-m02 --format={{.State.Status}}
	I0731 10:53:28.566013   96871 machine.go:88] provisioning docker machine ...
	I0731 10:53:28.566053   96871 ubuntu.go:169] provisioning hostname "multinode-776386-m02"
	I0731 10:53:28.566110   96871 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-776386-m02
	I0731 10:53:28.583333   96871 main.go:141] libmachine: Using SSH client type: native
	I0731 10:53:28.583797   96871 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x80eb00] 0x811ba0 <nil>  [] 0s} 127.0.0.1 32852 <nil> <nil>}
	I0731 10:53:28.583814   96871 main.go:141] libmachine: About to run SSH command:
	sudo hostname multinode-776386-m02 && echo "multinode-776386-m02" | sudo tee /etc/hostname
	I0731 10:53:28.584475   96871 main.go:141] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:38312->127.0.0.1:32852: read: connection reset by peer
	I0731 10:53:31.723719   96871 main.go:141] libmachine: SSH cmd err, output: <nil>: multinode-776386-m02
	
	I0731 10:53:31.723787   96871 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-776386-m02
	I0731 10:53:31.740439   96871 main.go:141] libmachine: Using SSH client type: native
	I0731 10:53:31.740842   96871 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x80eb00] 0x811ba0 <nil>  [] 0s} 127.0.0.1 32852 <nil> <nil>}
	I0731 10:53:31.740862   96871 main.go:141] libmachine: About to run SSH command:
	
			if ! grep -xq '.*\smultinode-776386-m02' /etc/hosts; then
				if grep -xq '127.0.1.1\s.*' /etc/hosts; then
					sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 multinode-776386-m02/g' /etc/hosts;
				else 
					echo '127.0.1.1 multinode-776386-m02' | sudo tee -a /etc/hosts; 
				fi
			fi
	I0731 10:53:31.861789   96871 main.go:141] libmachine: SSH cmd err, output: <nil>: 
	I0731 10:53:31.861818   96871 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/16969-5799/.minikube CaCertPath:/home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/16969-5799/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/16969-5799/.minikube}
	I0731 10:53:31.861851   96871 ubuntu.go:177] setting up certificates
	I0731 10:53:31.861861   96871 provision.go:83] configureAuth start
	I0731 10:53:31.861918   96871 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" multinode-776386-m02
	I0731 10:53:31.877678   96871 provision.go:138] copyHostCerts
	I0731 10:53:31.877711   96871 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/key.pem -> /home/jenkins/minikube-integration/16969-5799/.minikube/key.pem
	I0731 10:53:31.877734   96871 exec_runner.go:144] found /home/jenkins/minikube-integration/16969-5799/.minikube/key.pem, removing ...
	I0731 10:53:31.877742   96871 exec_runner.go:203] rm: /home/jenkins/minikube-integration/16969-5799/.minikube/key.pem
	I0731 10:53:31.877805   96871 exec_runner.go:151] cp: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/16969-5799/.minikube/key.pem (1675 bytes)
	I0731 10:53:31.877878   96871 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem -> /home/jenkins/minikube-integration/16969-5799/.minikube/ca.pem
	I0731 10:53:31.877898   96871 exec_runner.go:144] found /home/jenkins/minikube-integration/16969-5799/.minikube/ca.pem, removing ...
	I0731 10:53:31.877903   96871 exec_runner.go:203] rm: /home/jenkins/minikube-integration/16969-5799/.minikube/ca.pem
	I0731 10:53:31.877929   96871 exec_runner.go:151] cp: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/16969-5799/.minikube/ca.pem (1082 bytes)
	I0731 10:53:31.877970   96871 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/cert.pem -> /home/jenkins/minikube-integration/16969-5799/.minikube/cert.pem
	I0731 10:53:31.877984   96871 exec_runner.go:144] found /home/jenkins/minikube-integration/16969-5799/.minikube/cert.pem, removing ...
	I0731 10:53:31.877988   96871 exec_runner.go:203] rm: /home/jenkins/minikube-integration/16969-5799/.minikube/cert.pem
	I0731 10:53:31.878009   96871 exec_runner.go:151] cp: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/16969-5799/.minikube/cert.pem (1123 bytes)
	I0731 10:53:31.878053   96871 provision.go:112] generating server cert: /home/jenkins/minikube-integration/16969-5799/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca-key.pem org=jenkins.multinode-776386-m02 san=[192.168.58.3 127.0.0.1 localhost 127.0.0.1 minikube multinode-776386-m02]
	I0731 10:53:31.957361   96871 provision.go:172] copyRemoteCerts
	I0731 10:53:31.957435   96871 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
	I0731 10:53:31.957472   96871 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-776386-m02
	I0731 10:53:31.973348   96871 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32852 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/multinode-776386-m02/id_rsa Username:docker}
	I0731 10:53:32.061866   96871 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem -> /etc/docker/ca.pem
	I0731 10:53:32.061919   96871 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
	I0731 10:53:32.081742   96871 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/machines/server.pem -> /etc/docker/server.pem
	I0731 10:53:32.081798   96871 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/machines/server.pem --> /etc/docker/server.pem (1237 bytes)
	I0731 10:53:32.101463   96871 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/machines/server-key.pem -> /etc/docker/server-key.pem
	I0731 10:53:32.101505   96871 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
	I0731 10:53:32.121720   96871 provision.go:86] duration metric: configureAuth took 259.846002ms
	I0731 10:53:32.121743   96871 ubuntu.go:193] setting minikube options for container-runtime
	I0731 10:53:32.121908   96871 config.go:182] Loaded profile config "multinode-776386": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.27.3
	I0731 10:53:32.121996   96871 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-776386-m02
	I0731 10:53:32.137719   96871 main.go:141] libmachine: Using SSH client type: native
	I0731 10:53:32.138104   96871 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x80eb00] 0x811ba0 <nil>  [] 0s} 127.0.0.1 32852 <nil> <nil>}
	I0731 10:53:32.138120   96871 main.go:141] libmachine: About to run SSH command:
	sudo mkdir -p /etc/sysconfig && printf %!s(MISSING) "
	CRIO_MINIKUBE_OPTIONS='--insecure-registry 10.96.0.0/12 '
	" | sudo tee /etc/sysconfig/crio.minikube && sudo systemctl restart crio
	I0731 10:53:32.345408   96871 main.go:141] libmachine: SSH cmd err, output: <nil>: 
	CRIO_MINIKUBE_OPTIONS='--insecure-registry 10.96.0.0/12 '
	
	I0731 10:53:32.345438   96871 machine.go:91] provisioned docker machine in 3.779403997s
	I0731 10:53:32.345450   96871 client.go:171] LocalClient.Create took 9.777116916s
	I0731 10:53:32.345470   96871 start.go:167] duration metric: libmachine.API.Create for "multinode-776386" took 9.77717826s
	I0731 10:53:32.345483   96871 start.go:300] post-start starting for "multinode-776386-m02" (driver="docker")
	I0731 10:53:32.345490   96871 start.go:329] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
	I0731 10:53:32.345539   96871 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
	I0731 10:53:32.345571   96871 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-776386-m02
	I0731 10:53:32.361326   96871 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32852 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/multinode-776386-m02/id_rsa Username:docker}
	I0731 10:53:32.450355   96871 ssh_runner.go:195] Run: cat /etc/os-release
	I0731 10:53:32.453048   96871 command_runner.go:130] > PRETTY_NAME="Ubuntu 22.04.2 LTS"
	I0731 10:53:32.453064   96871 command_runner.go:130] > NAME="Ubuntu"
	I0731 10:53:32.453072   96871 command_runner.go:130] > VERSION_ID="22.04"
	I0731 10:53:32.453077   96871 command_runner.go:130] > VERSION="22.04.2 LTS (Jammy Jellyfish)"
	I0731 10:53:32.453082   96871 command_runner.go:130] > VERSION_CODENAME=jammy
	I0731 10:53:32.453085   96871 command_runner.go:130] > ID=ubuntu
	I0731 10:53:32.453092   96871 command_runner.go:130] > ID_LIKE=debian
	I0731 10:53:32.453100   96871 command_runner.go:130] > HOME_URL="https://www.ubuntu.com/"
	I0731 10:53:32.453108   96871 command_runner.go:130] > SUPPORT_URL="https://help.ubuntu.com/"
	I0731 10:53:32.453125   96871 command_runner.go:130] > BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/"
	I0731 10:53:32.453137   96871 command_runner.go:130] > PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy"
	I0731 10:53:32.453147   96871 command_runner.go:130] > UBUNTU_CODENAME=jammy
	I0731 10:53:32.453192   96871 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
	I0731 10:53:32.453231   96871 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
	I0731 10:53:32.453247   96871 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
	I0731 10:53:32.453255   96871 info.go:137] Remote host: Ubuntu 22.04.2 LTS
	I0731 10:53:32.453263   96871 filesync.go:126] Scanning /home/jenkins/minikube-integration/16969-5799/.minikube/addons for local assets ...
	I0731 10:53:32.453314   96871 filesync.go:126] Scanning /home/jenkins/minikube-integration/16969-5799/.minikube/files for local assets ...
	I0731 10:53:32.453405   96871 filesync.go:149] local asset: /home/jenkins/minikube-integration/16969-5799/.minikube/files/etc/ssl/certs/125372.pem -> 125372.pem in /etc/ssl/certs
	I0731 10:53:32.453418   96871 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/files/etc/ssl/certs/125372.pem -> /etc/ssl/certs/125372.pem
	I0731 10:53:32.453528   96871 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
	I0731 10:53:32.460742   96871 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/files/etc/ssl/certs/125372.pem --> /etc/ssl/certs/125372.pem (1708 bytes)
	I0731 10:53:32.480150   96871 start.go:303] post-start completed in 134.657027ms
	I0731 10:53:32.480447   96871 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" multinode-776386-m02
	I0731 10:53:32.496626   96871 profile.go:148] Saving config to /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/config.json ...
	I0731 10:53:32.496838   96871 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I0731 10:53:32.496880   96871 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-776386-m02
	I0731 10:53:32.513041   96871 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32852 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/multinode-776386-m02/id_rsa Username:docker}
	I0731 10:53:32.598798   96871 command_runner.go:130] > 18%!
	(MISSING)I0731 10:53:32.598874   96871 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
	I0731 10:53:32.602836   96871 command_runner.go:130] > 241G
	I0731 10:53:32.602870   96871 start.go:128] duration metric: createHost completed in 10.036915092s
	I0731 10:53:32.602882   96871 start.go:83] releasing machines lock for "multinode-776386-m02", held for 10.037037911s
	I0731 10:53:32.602933   96871 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" multinode-776386-m02
	I0731 10:53:32.621115   96871 out.go:177] * Found network options:
	I0731 10:53:32.622700   96871 out.go:177]   - NO_PROXY=192.168.58.2
	W0731 10:53:32.624234   96871 proxy.go:119] fail to check proxy env: Error ip not in block
	W0731 10:53:32.624268   96871 proxy.go:119] fail to check proxy env: Error ip not in block
	I0731 10:53:32.624321   96871 ssh_runner.go:195] Run: sudo sh -c "podman version >/dev/null"
	I0731 10:53:32.624391   96871 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-776386-m02
	I0731 10:53:32.624402   96871 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
	I0731 10:53:32.624447   96871 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-776386-m02
	I0731 10:53:32.640774   96871 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32852 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/multinode-776386-m02/id_rsa Username:docker}
	I0731 10:53:32.641770   96871 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32852 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/multinode-776386-m02/id_rsa Username:docker}
	I0731 10:53:32.861129   96871 command_runner.go:130] > <a href="https://github.com/kubernetes/registry.k8s.io">Temporary Redirect</a>.
	I0731 10:53:32.861212   96871 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
	I0731 10:53:32.865240   96871 command_runner.go:130] >   File: /etc/cni/net.d/200-loopback.conf
	I0731 10:53:32.865271   96871 command_runner.go:130] >   Size: 54        	Blocks: 8          IO Block: 4096   regular file
	I0731 10:53:32.865283   96871 command_runner.go:130] > Device: b0h/176d	Inode: 800494      Links: 1
	I0731 10:53:32.865295   96871 command_runner.go:130] > Access: (0644/-rw-r--r--)  Uid: (    0/    root)   Gid: (    0/    root)
	I0731 10:53:32.865314   96871 command_runner.go:130] > Access: 2023-06-14 14:44:50.000000000 +0000
	I0731 10:53:32.865323   96871 command_runner.go:130] > Modify: 2023-06-14 14:44:50.000000000 +0000
	I0731 10:53:32.865334   96871 command_runner.go:130] > Change: 2023-07-31 10:33:55.863843355 +0000
	I0731 10:53:32.865343   96871 command_runner.go:130] >  Birth: 2023-07-31 10:33:55.863843355 +0000
	I0731 10:53:32.865466   96871 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "sudo mv {} {}.mk_disabled" ;
	I0731 10:53:32.881545   96871 cni.go:221] loopback cni configuration disabled: "/etc/cni/net.d/*loopback.conf*" found
	I0731 10:53:32.881611   96871 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%!p(MISSING), " -exec sh -c "sudo mv {} {}.mk_disabled" ;
	I0731 10:53:32.906750   96871 command_runner.go:139] > /etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf, 
	I0731 10:53:32.906789   96871 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
	I0731 10:53:32.906798   96871 start.go:466] detecting cgroup driver to use...
	I0731 10:53:32.906829   96871 detect.go:196] detected "cgroupfs" cgroup driver on host os
	I0731 10:53:32.906873   96871 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
	I0731 10:53:32.919642   96871 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
	I0731 10:53:32.928837   96871 docker.go:196] disabling cri-docker service (if available) ...
	I0731 10:53:32.928883   96871 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
	I0731 10:53:32.940239   96871 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
	I0731 10:53:32.951816   96871 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
	I0731 10:53:33.029913   96871 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
	I0731 10:53:33.110556   96871 command_runner.go:130] ! Created symlink /etc/systemd/system/cri-docker.service → /dev/null.
	I0731 10:53:33.110601   96871 docker.go:212] disabling docker service ...
	I0731 10:53:33.110639   96871 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
	I0731 10:53:33.126781   96871 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
	I0731 10:53:33.136441   96871 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
	I0731 10:53:33.147019   96871 command_runner.go:130] ! Removed /etc/systemd/system/sockets.target.wants/docker.socket.
	I0731 10:53:33.218030   96871 ssh_runner.go:195] Run: sudo systemctl mask docker.service
	I0731 10:53:33.227912   96871 command_runner.go:130] ! Created symlink /etc/systemd/system/docker.service → /dev/null.
	I0731 10:53:33.294804   96871 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
	I0731 10:53:33.304193   96871 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %!s(MISSING) "runtime-endpoint: unix:///var/run/crio/crio.sock
	" | sudo tee /etc/crictl.yaml"
	I0731 10:53:33.316998   96871 command_runner.go:130] > runtime-endpoint: unix:///var/run/crio/crio.sock
	I0731 10:53:33.317672   96871 crio.go:59] configure cri-o to use "registry.k8s.io/pause:3.9" pause image...
	I0731 10:53:33.317723   96871 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|^.*pause_image = .*$|pause_image = "registry.k8s.io/pause:3.9"|' /etc/crio/crio.conf.d/02-crio.conf"
	I0731 10:53:33.325766   96871 crio.go:70] configuring cri-o to use "cgroupfs" as cgroup driver...
	I0731 10:53:33.325840   96871 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|^.*cgroup_manager = .*$|cgroup_manager = "cgroupfs"|' /etc/crio/crio.conf.d/02-crio.conf"
	I0731 10:53:33.333880   96871 ssh_runner.go:195] Run: sh -c "sudo sed -i '/conmon_cgroup = .*/d' /etc/crio/crio.conf.d/02-crio.conf"
	I0731 10:53:33.341667   96871 ssh_runner.go:195] Run: sh -c "sudo sed -i '/cgroup_manager = .*/a conmon_cgroup = "pod"' /etc/crio/crio.conf.d/02-crio.conf"
	I0731 10:53:33.349661   96871 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
	I0731 10:53:33.357265   96871 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
	I0731 10:53:33.364237   96871 command_runner.go:130] > net.bridge.bridge-nf-call-iptables = 1
	I0731 10:53:33.364289   96871 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
	I0731 10:53:33.371464   96871 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0731 10:53:33.446545   96871 ssh_runner.go:195] Run: sudo systemctl restart crio
	I0731 10:53:33.550266   96871 start.go:513] Will wait 60s for socket path /var/run/crio/crio.sock
	I0731 10:53:33.550327   96871 ssh_runner.go:195] Run: stat /var/run/crio/crio.sock
	I0731 10:53:33.553361   96871 command_runner.go:130] >   File: /var/run/crio/crio.sock
	I0731 10:53:33.553379   96871 command_runner.go:130] >   Size: 0         	Blocks: 0          IO Block: 4096   socket
	I0731 10:53:33.553386   96871 command_runner.go:130] > Device: b9h/185d	Inode: 186         Links: 1
	I0731 10:53:33.553392   96871 command_runner.go:130] > Access: (0660/srw-rw----)  Uid: (    0/    root)   Gid: (    0/    root)
	I0731 10:53:33.553397   96871 command_runner.go:130] > Access: 2023-07-31 10:53:33.536841294 +0000
	I0731 10:53:33.553403   96871 command_runner.go:130] > Modify: 2023-07-31 10:53:33.536841294 +0000
	I0731 10:53:33.553410   96871 command_runner.go:130] > Change: 2023-07-31 10:53:33.536841294 +0000
	I0731 10:53:33.553416   96871 command_runner.go:130] >  Birth: -
	I0731 10:53:33.553429   96871 start.go:534] Will wait 60s for crictl version
	I0731 10:53:33.553460   96871 ssh_runner.go:195] Run: which crictl
	I0731 10:53:33.556361   96871 command_runner.go:130] > /usr/bin/crictl
	I0731 10:53:33.556425   96871 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
	I0731 10:53:33.588720   96871 command_runner.go:130] > Version:  0.1.0
	I0731 10:53:33.588743   96871 command_runner.go:130] > RuntimeName:  cri-o
	I0731 10:53:33.588748   96871 command_runner.go:130] > RuntimeVersion:  1.24.6
	I0731 10:53:33.588753   96871 command_runner.go:130] > RuntimeApiVersion:  v1
	I0731 10:53:33.588768   96871 start.go:550] Version:  0.1.0
	RuntimeName:  cri-o
	RuntimeVersion:  1.24.6
	RuntimeApiVersion:  v1
	I0731 10:53:33.588838   96871 ssh_runner.go:195] Run: crio --version
	I0731 10:53:33.619292   96871 command_runner.go:130] > crio version 1.24.6
	I0731 10:53:33.619309   96871 command_runner.go:130] > Version:          1.24.6
	I0731 10:53:33.619316   96871 command_runner.go:130] > GitCommit:        4bfe15a9feb74ffc95e66a21c04b15fa7bbc2b90
	I0731 10:53:33.619321   96871 command_runner.go:130] > GitTreeState:     clean
	I0731 10:53:33.619329   96871 command_runner.go:130] > BuildDate:        2023-06-14T14:44:50Z
	I0731 10:53:33.619334   96871 command_runner.go:130] > GoVersion:        go1.18.2
	I0731 10:53:33.619338   96871 command_runner.go:130] > Compiler:         gc
	I0731 10:53:33.619342   96871 command_runner.go:130] > Platform:         linux/amd64
	I0731 10:53:33.619347   96871 command_runner.go:130] > Linkmode:         dynamic
	I0731 10:53:33.619354   96871 command_runner.go:130] > BuildTags:        apparmor, exclude_graphdriver_devicemapper, containers_image_ostree_stub, seccomp
	I0731 10:53:33.619358   96871 command_runner.go:130] > SeccompEnabled:   true
	I0731 10:53:33.619362   96871 command_runner.go:130] > AppArmorEnabled:  false
	I0731 10:53:33.620696   96871 ssh_runner.go:195] Run: crio --version
	I0731 10:53:33.650852   96871 command_runner.go:130] > crio version 1.24.6
	I0731 10:53:33.650874   96871 command_runner.go:130] > Version:          1.24.6
	I0731 10:53:33.650882   96871 command_runner.go:130] > GitCommit:        4bfe15a9feb74ffc95e66a21c04b15fa7bbc2b90
	I0731 10:53:33.650886   96871 command_runner.go:130] > GitTreeState:     clean
	I0731 10:53:33.650892   96871 command_runner.go:130] > BuildDate:        2023-06-14T14:44:50Z
	I0731 10:53:33.650898   96871 command_runner.go:130] > GoVersion:        go1.18.2
	I0731 10:53:33.650905   96871 command_runner.go:130] > Compiler:         gc
	I0731 10:53:33.650913   96871 command_runner.go:130] > Platform:         linux/amd64
	I0731 10:53:33.650923   96871 command_runner.go:130] > Linkmode:         dynamic
	I0731 10:53:33.650941   96871 command_runner.go:130] > BuildTags:        apparmor, exclude_graphdriver_devicemapper, containers_image_ostree_stub, seccomp
	I0731 10:53:33.650953   96871 command_runner.go:130] > SeccompEnabled:   true
	I0731 10:53:33.650965   96871 command_runner.go:130] > AppArmorEnabled:  false
	I0731 10:53:33.653115   96871 out.go:177] * Preparing Kubernetes v1.27.3 on CRI-O 1.24.6 ...
	I0731 10:53:33.654724   96871 out.go:177]   - env NO_PROXY=192.168.58.2
	I0731 10:53:33.656202   96871 cli_runner.go:164] Run: docker network inspect multinode-776386 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
	I0731 10:53:33.673233   96871 ssh_runner.go:195] Run: grep 192.168.58.1	host.minikube.internal$ /etc/hosts
	I0731 10:53:33.676752   96871 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.58.1	host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I0731 10:53:33.686879   96871 certs.go:56] Setting up /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386 for IP: 192.168.58.3
	I0731 10:53:33.686906   96871 certs.go:190] acquiring lock for shared ca certs: {Name:mke1f008d411b97835fe7ef4c9ac6bdba0705009 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0731 10:53:33.687036   96871 certs.go:199] skipping minikubeCA CA generation: /home/jenkins/minikube-integration/16969-5799/.minikube/ca.key
	I0731 10:53:33.687073   96871 certs.go:199] skipping proxyClientCA CA generation: /home/jenkins/minikube-integration/16969-5799/.minikube/proxy-client-ca.key
	I0731 10:53:33.687086   96871 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/ca.crt -> /var/lib/minikube/certs/ca.crt
	I0731 10:53:33.687098   96871 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/ca.key -> /var/lib/minikube/certs/ca.key
	I0731 10:53:33.687110   96871 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/proxy-client-ca.crt -> /var/lib/minikube/certs/proxy-client-ca.crt
	I0731 10:53:33.687121   96871 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/proxy-client-ca.key -> /var/lib/minikube/certs/proxy-client-ca.key
	I0731 10:53:33.687165   96871 certs.go:437] found cert: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/home/jenkins/minikube-integration/16969-5799/.minikube/certs/12537.pem (1338 bytes)
	W0731 10:53:33.687194   96871 certs.go:433] ignoring /home/jenkins/minikube-integration/16969-5799/.minikube/certs/home/jenkins/minikube-integration/16969-5799/.minikube/certs/12537_empty.pem, impossibly tiny 0 bytes
	I0731 10:53:33.687204   96871 certs.go:437] found cert: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca-key.pem (1675 bytes)
	I0731 10:53:33.687228   96871 certs.go:437] found cert: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem (1082 bytes)
	I0731 10:53:33.687249   96871 certs.go:437] found cert: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/home/jenkins/minikube-integration/16969-5799/.minikube/certs/cert.pem (1123 bytes)
	I0731 10:53:33.687270   96871 certs.go:437] found cert: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/home/jenkins/minikube-integration/16969-5799/.minikube/certs/key.pem (1675 bytes)
	I0731 10:53:33.687308   96871 certs.go:437] found cert: /home/jenkins/minikube-integration/16969-5799/.minikube/files/etc/ssl/certs/home/jenkins/minikube-integration/16969-5799/.minikube/files/etc/ssl/certs/125372.pem (1708 bytes)
	I0731 10:53:33.687333   96871 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/ca.crt -> /usr/share/ca-certificates/minikubeCA.pem
	I0731 10:53:33.687345   96871 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/12537.pem -> /usr/share/ca-certificates/12537.pem
	I0731 10:53:33.687357   96871 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/16969-5799/.minikube/files/etc/ssl/certs/125372.pem -> /usr/share/ca-certificates/125372.pem
	I0731 10:53:33.687681   96871 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
	I0731 10:53:33.708774   96871 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
	I0731 10:53:33.729349   96871 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
	I0731 10:53:33.749400   96871 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
	I0731 10:53:33.770114   96871 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
	I0731 10:53:33.790266   96871 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/certs/12537.pem --> /usr/share/ca-certificates/12537.pem (1338 bytes)
	I0731 10:53:33.811013   96871 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/files/etc/ssl/certs/125372.pem --> /usr/share/ca-certificates/125372.pem (1708 bytes)
	I0731 10:53:33.830945   96871 ssh_runner.go:195] Run: openssl version
	I0731 10:53:33.835435   96871 command_runner.go:130] > OpenSSL 3.0.2 15 Mar 2022 (Library: OpenSSL 3.0.2 15 Mar 2022)
	I0731 10:53:33.835654   96871 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
	I0731 10:53:33.843846   96871 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
	I0731 10:53:33.846788   96871 command_runner.go:130] > -rw-r--r-- 1 root root 1111 Jul 31 10:34 /usr/share/ca-certificates/minikubeCA.pem
	I0731 10:53:33.846810   96871 certs.go:480] hashing: -rw-r--r-- 1 root root 1111 Jul 31 10:34 /usr/share/ca-certificates/minikubeCA.pem
	I0731 10:53:33.846842   96871 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
	I0731 10:53:33.852438   96871 command_runner.go:130] > b5213941
	I0731 10:53:33.852638   96871 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
	I0731 10:53:33.860268   96871 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/12537.pem && ln -fs /usr/share/ca-certificates/12537.pem /etc/ssl/certs/12537.pem"
	I0731 10:53:33.867845   96871 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/12537.pem
	I0731 10:53:33.870668   96871 command_runner.go:130] > -rw-r--r-- 1 root root 1338 Jul 31 10:39 /usr/share/ca-certificates/12537.pem
	I0731 10:53:33.870687   96871 certs.go:480] hashing: -rw-r--r-- 1 root root 1338 Jul 31 10:39 /usr/share/ca-certificates/12537.pem
	I0731 10:53:33.870717   96871 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/12537.pem
	I0731 10:53:33.876404   96871 command_runner.go:130] > 51391683
	I0731 10:53:33.876626   96871 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/12537.pem /etc/ssl/certs/51391683.0"
	I0731 10:53:33.884183   96871 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/125372.pem && ln -fs /usr/share/ca-certificates/125372.pem /etc/ssl/certs/125372.pem"
	I0731 10:53:33.892087   96871 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/125372.pem
	I0731 10:53:33.894904   96871 command_runner.go:130] > -rw-r--r-- 1 root root 1708 Jul 31 10:39 /usr/share/ca-certificates/125372.pem
	I0731 10:53:33.894946   96871 certs.go:480] hashing: -rw-r--r-- 1 root root 1708 Jul 31 10:39 /usr/share/ca-certificates/125372.pem
	I0731 10:53:33.894978   96871 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/125372.pem
	I0731 10:53:33.900448   96871 command_runner.go:130] > 3ec20f2e
	I0731 10:53:33.900682   96871 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/125372.pem /etc/ssl/certs/3ec20f2e.0"
	I0731 10:53:33.908606   96871 ssh_runner.go:195] Run: ls /var/lib/minikube/certs/etcd
	I0731 10:53:33.911333   96871 command_runner.go:130] ! ls: cannot access '/var/lib/minikube/certs/etcd': No such file or directory
	I0731 10:53:33.911368   96871 certs.go:353] certs directory doesn't exist, likely first start: ls /var/lib/minikube/certs/etcd: Process exited with status 2
	stdout:
	
	stderr:
	ls: cannot access '/var/lib/minikube/certs/etcd': No such file or directory
	I0731 10:53:33.911451   96871 ssh_runner.go:195] Run: crio config
	I0731 10:53:33.944656   96871 command_runner.go:130] ! time="2023-07-31 10:53:33.944309315Z" level=info msg="Starting CRI-O, version: 1.24.6, git: 4bfe15a9feb74ffc95e66a21c04b15fa7bbc2b90(clean)"
	I0731 10:53:33.944692   96871 command_runner.go:130] ! level=info msg="Using default capabilities: CAP_CHOWN, CAP_DAC_OVERRIDE, CAP_FSETID, CAP_FOWNER, CAP_SETGID, CAP_SETUID, CAP_SETPCAP, CAP_NET_BIND_SERVICE, CAP_KILL"
	I0731 10:53:33.949695   96871 command_runner.go:130] > # The CRI-O configuration file specifies all of the available configuration
	I0731 10:53:33.949724   96871 command_runner.go:130] > # options and command-line flags for the crio(8) OCI Kubernetes Container Runtime
	I0731 10:53:33.949734   96871 command_runner.go:130] > # daemon, but in a TOML format that can be more easily modified and versioned.
	I0731 10:53:33.949740   96871 command_runner.go:130] > #
	I0731 10:53:33.949755   96871 command_runner.go:130] > # Please refer to crio.conf(5) for details of all configuration options.
	I0731 10:53:33.949769   96871 command_runner.go:130] > # CRI-O supports partial configuration reload during runtime, which can be
	I0731 10:53:33.949784   96871 command_runner.go:130] > # done by sending SIGHUP to the running process. Currently supported options
	I0731 10:53:33.949805   96871 command_runner.go:130] > # are explicitly mentioned with: 'This option supports live configuration
	I0731 10:53:33.949814   96871 command_runner.go:130] > # reload'.
	I0731 10:53:33.949827   96871 command_runner.go:130] > # CRI-O reads its storage defaults from the containers-storage.conf(5) file
	I0731 10:53:33.949840   96871 command_runner.go:130] > # located at /etc/containers/storage.conf. Modify this storage configuration if
	I0731 10:53:33.949854   96871 command_runner.go:130] > # you want to change the system's defaults. If you want to modify storage just
	I0731 10:53:33.949868   96871 command_runner.go:130] > # for CRI-O, you can change the storage configuration options here.
	I0731 10:53:33.949878   96871 command_runner.go:130] > [crio]
	I0731 10:53:33.949891   96871 command_runner.go:130] > # Path to the "root directory". CRI-O stores all of its data, including
	I0731 10:53:33.949903   96871 command_runner.go:130] > # containers images, in this directory.
	I0731 10:53:33.949916   96871 command_runner.go:130] > # root = "/home/docker/.local/share/containers/storage"
	I0731 10:53:33.949925   96871 command_runner.go:130] > # Path to the "run directory". CRI-O stores all of its state in this directory.
	I0731 10:53:33.949937   96871 command_runner.go:130] > # runroot = "/tmp/containers-user-1000/containers"
	I0731 10:53:33.949952   96871 command_runner.go:130] > # Storage driver used to manage the storage of images and containers. Please
	I0731 10:53:33.949967   96871 command_runner.go:130] > # refer to containers-storage.conf(5) to see all available storage drivers.
	I0731 10:53:33.949978   96871 command_runner.go:130] > # storage_driver = "vfs"
	I0731 10:53:33.949991   96871 command_runner.go:130] > # List to pass options to the storage driver. Please refer to
	I0731 10:53:33.950003   96871 command_runner.go:130] > # containers-storage.conf(5) to see all available storage options.
	I0731 10:53:33.950013   96871 command_runner.go:130] > # storage_option = [
	I0731 10:53:33.950024   96871 command_runner.go:130] > # ]
	I0731 10:53:33.950039   96871 command_runner.go:130] > # The default log directory where all logs will go unless directly specified by
	I0731 10:53:33.950053   96871 command_runner.go:130] > # the kubelet. The log directory specified must be an absolute directory.
	I0731 10:53:33.950065   96871 command_runner.go:130] > # log_dir = "/var/log/crio/pods"
	I0731 10:53:33.950078   96871 command_runner.go:130] > # Location for CRI-O to lay down the temporary version file.
	I0731 10:53:33.950091   96871 command_runner.go:130] > # It is used to check if crio wipe should wipe containers, which should
	I0731 10:53:33.950102   96871 command_runner.go:130] > # always happen on a node reboot
	I0731 10:53:33.950113   96871 command_runner.go:130] > # version_file = "/var/run/crio/version"
	I0731 10:53:33.950122   96871 command_runner.go:130] > # Location for CRI-O to lay down the persistent version file.
	I0731 10:53:33.950134   96871 command_runner.go:130] > # It is used to check if crio wipe should wipe images, which should
	I0731 10:53:33.950154   96871 command_runner.go:130] > # only happen when CRI-O has been upgraded
	I0731 10:53:33.950166   96871 command_runner.go:130] > # version_file_persist = "/var/lib/crio/version"
	I0731 10:53:33.950181   96871 command_runner.go:130] > # InternalWipe is whether CRI-O should wipe containers and images after a reboot when the server starts.
	I0731 10:53:33.950207   96871 command_runner.go:130] > # If set to false, one must use the external command 'crio wipe' to wipe the containers and images in these situations.
	I0731 10:53:33.950214   96871 command_runner.go:130] > # internal_wipe = true
	I0731 10:53:33.950223   96871 command_runner.go:130] > # Location for CRI-O to lay down the clean shutdown file.
	I0731 10:53:33.950238   96871 command_runner.go:130] > # It is used to check whether crio had time to sync before shutting down.
	I0731 10:53:33.950250   96871 command_runner.go:130] > # If not found, crio wipe will clear the storage directory.
	I0731 10:53:33.950265   96871 command_runner.go:130] > # clean_shutdown_file = "/var/lib/crio/clean.shutdown"
	I0731 10:53:33.950279   96871 command_runner.go:130] > # The crio.api table contains settings for the kubelet/gRPC interface.
	I0731 10:53:33.950288   96871 command_runner.go:130] > [crio.api]
	I0731 10:53:33.950293   96871 command_runner.go:130] > # Path to AF_LOCAL socket on which CRI-O will listen.
	I0731 10:53:33.950303   96871 command_runner.go:130] > # listen = "/var/run/crio/crio.sock"
	I0731 10:53:33.950316   96871 command_runner.go:130] > # IP address on which the stream server will listen.
	I0731 10:53:33.950326   96871 command_runner.go:130] > # stream_address = "127.0.0.1"
	I0731 10:53:33.950340   96871 command_runner.go:130] > # The port on which the stream server will listen. If the port is set to "0", then
	I0731 10:53:33.950352   96871 command_runner.go:130] > # CRI-O will allocate a random free port number.
	I0731 10:53:33.950362   96871 command_runner.go:130] > # stream_port = "0"
	I0731 10:53:33.950373   96871 command_runner.go:130] > # Enable encrypted TLS transport of the stream server.
	I0731 10:53:33.950381   96871 command_runner.go:130] > # stream_enable_tls = false
	I0731 10:53:33.950390   96871 command_runner.go:130] > # Length of time until open streams terminate due to lack of activity
	I0731 10:53:33.950401   96871 command_runner.go:130] > # stream_idle_timeout = ""
	I0731 10:53:33.950412   96871 command_runner.go:130] > # Path to the x509 certificate file used to serve the encrypted stream. This
	I0731 10:53:33.950426   96871 command_runner.go:130] > # file can change, and CRI-O will automatically pick up the changes within 5
	I0731 10:53:33.950435   96871 command_runner.go:130] > # minutes.
	I0731 10:53:33.950445   96871 command_runner.go:130] > # stream_tls_cert = ""
	I0731 10:53:33.950462   96871 command_runner.go:130] > # Path to the key file used to serve the encrypted stream. This file can
	I0731 10:53:33.950473   96871 command_runner.go:130] > # change and CRI-O will automatically pick up the changes within 5 minutes.
	I0731 10:53:33.950481   96871 command_runner.go:130] > # stream_tls_key = ""
	I0731 10:53:33.950500   96871 command_runner.go:130] > # Path to the x509 CA(s) file used to verify and authenticate client
	I0731 10:53:33.950517   96871 command_runner.go:130] > # communication with the encrypted stream. This file can change and CRI-O will
	I0731 10:53:33.950529   96871 command_runner.go:130] > # automatically pick up the changes within 5 minutes.
	I0731 10:53:33.950539   96871 command_runner.go:130] > # stream_tls_ca = ""
	I0731 10:53:33.950554   96871 command_runner.go:130] > # Maximum grpc send message size in bytes. If not set or <=0, then CRI-O will default to 16 * 1024 * 1024.
	I0731 10:53:33.950563   96871 command_runner.go:130] > # grpc_max_send_msg_size = 83886080
	I0731 10:53:33.950571   96871 command_runner.go:130] > # Maximum grpc receive message size. If not set or <= 0, then CRI-O will default to 16 * 1024 * 1024.
	I0731 10:53:33.950582   96871 command_runner.go:130] > # grpc_max_recv_msg_size = 83886080
	I0731 10:53:33.950622   96871 command_runner.go:130] > # The crio.runtime table contains settings pertaining to the OCI runtime used
	I0731 10:53:33.950636   96871 command_runner.go:130] > # and options for how to set up and manage the OCI runtime.
	I0731 10:53:33.950643   96871 command_runner.go:130] > [crio.runtime]
	I0731 10:53:33.950655   96871 command_runner.go:130] > # A list of ulimits to be set in containers by default, specified as
	I0731 10:53:33.950664   96871 command_runner.go:130] > # "<ulimit name>=<soft limit>:<hard limit>", for example:
	I0731 10:53:33.950673   96871 command_runner.go:130] > # "nofile=1024:2048"
	I0731 10:53:33.950687   96871 command_runner.go:130] > # If nothing is set here, settings will be inherited from the CRI-O daemon
	I0731 10:53:33.950700   96871 command_runner.go:130] > # default_ulimits = [
	I0731 10:53:33.950710   96871 command_runner.go:130] > # ]
	I0731 10:53:33.950724   96871 command_runner.go:130] > # If true, the runtime will not use pivot_root, but instead use MS_MOVE.
	I0731 10:53:33.950733   96871 command_runner.go:130] > # no_pivot = false
	I0731 10:53:33.950746   96871 command_runner.go:130] > # decryption_keys_path is the path where the keys required for
	I0731 10:53:33.950760   96871 command_runner.go:130] > # image decryption are stored. This option supports live configuration reload.
	I0731 10:53:33.950768   96871 command_runner.go:130] > # decryption_keys_path = "/etc/crio/keys/"
	I0731 10:53:33.950780   96871 command_runner.go:130] > # Path to the conmon binary, used for monitoring the OCI runtime.
	I0731 10:53:33.950791   96871 command_runner.go:130] > # Will be searched for using $PATH if empty.
	I0731 10:53:33.950806   96871 command_runner.go:130] > # This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorEnv.
	I0731 10:53:33.950816   96871 command_runner.go:130] > # conmon = ""
	I0731 10:53:33.950826   96871 command_runner.go:130] > # Cgroup setting for conmon
	I0731 10:53:33.950841   96871 command_runner.go:130] > # This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorCgroup.
	I0731 10:53:33.950850   96871 command_runner.go:130] > conmon_cgroup = "pod"
	I0731 10:53:33.950860   96871 command_runner.go:130] > # Environment variable list for the conmon process, used for passing necessary
	I0731 10:53:33.950872   96871 command_runner.go:130] > # environment variables to conmon or the runtime.
	I0731 10:53:33.950888   96871 command_runner.go:130] > # This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorEnv.
	I0731 10:53:33.950898   96871 command_runner.go:130] > # conmon_env = [
	I0731 10:53:33.950911   96871 command_runner.go:130] > # ]
	I0731 10:53:33.950923   96871 command_runner.go:130] > # Additional environment variables to set for all the
	I0731 10:53:33.950935   96871 command_runner.go:130] > # containers. These are overridden if set in the
	I0731 10:53:33.950947   96871 command_runner.go:130] > # container image spec or in the container runtime configuration.
	I0731 10:53:33.950954   96871 command_runner.go:130] > # default_env = [
	I0731 10:53:33.950959   96871 command_runner.go:130] > # ]
	I0731 10:53:33.950972   96871 command_runner.go:130] > # If true, SELinux will be used for pod separation on the host.
	I0731 10:53:33.950984   96871 command_runner.go:130] > # selinux = false
	I0731 10:53:33.950998   96871 command_runner.go:130] > # Path to the seccomp.json profile which is used as the default seccomp profile
	I0731 10:53:33.951012   96871 command_runner.go:130] > # for the runtime. If not specified, then the internal default seccomp profile
	I0731 10:53:33.951025   96871 command_runner.go:130] > # will be used. This option supports live configuration reload.
	I0731 10:53:33.951035   96871 command_runner.go:130] > # seccomp_profile = ""
	I0731 10:53:33.951047   96871 command_runner.go:130] > # Changes the meaning of an empty seccomp profile. By default
	I0731 10:53:33.951057   96871 command_runner.go:130] > # (and according to CRI spec), an empty profile means unconfined.
	I0731 10:53:33.951069   96871 command_runner.go:130] > # This option tells CRI-O to treat an empty profile as the default profile,
	I0731 10:53:33.951079   96871 command_runner.go:130] > # which might increase security.
	I0731 10:53:33.951091   96871 command_runner.go:130] > # seccomp_use_default_when_empty = true
	I0731 10:53:33.951107   96871 command_runner.go:130] > # Used to change the name of the default AppArmor profile of CRI-O. The default
	I0731 10:53:33.951125   96871 command_runner.go:130] > # profile name is "crio-default". This profile only takes effect if the user
	I0731 10:53:33.951139   96871 command_runner.go:130] > # does not specify a profile via the Kubernetes Pod's metadata annotation. If
	I0731 10:53:33.951152   96871 command_runner.go:130] > # the profile is set to "unconfined", then this equals to disabling AppArmor.
	I0731 10:53:33.951162   96871 command_runner.go:130] > # This option supports live configuration reload.
	I0731 10:53:33.951172   96871 command_runner.go:130] > # apparmor_profile = "crio-default"
	I0731 10:53:33.951185   96871 command_runner.go:130] > # Path to the blockio class configuration file for configuring
	I0731 10:53:33.951195   96871 command_runner.go:130] > # the cgroup blockio controller.
	I0731 10:53:33.951203   96871 command_runner.go:130] > # blockio_config_file = ""
	I0731 10:53:33.951218   96871 command_runner.go:130] > # Used to change irqbalance service config file path which is used for configuring
	I0731 10:53:33.951228   96871 command_runner.go:130] > # irqbalance daemon.
	I0731 10:53:33.951241   96871 command_runner.go:130] > # irqbalance_config_file = "/etc/sysconfig/irqbalance"
	I0731 10:53:33.951255   96871 command_runner.go:130] > # Path to the RDT configuration file for configuring the resctrl pseudo-filesystem.
	I0731 10:53:33.951264   96871 command_runner.go:130] > # This option supports live configuration reload.
	I0731 10:53:33.951271   96871 command_runner.go:130] > # rdt_config_file = ""
	I0731 10:53:33.951280   96871 command_runner.go:130] > # Cgroup management implementation used for the runtime.
	I0731 10:53:33.951290   96871 command_runner.go:130] > cgroup_manager = "cgroupfs"
	I0731 10:53:33.951301   96871 command_runner.go:130] > # Specify whether the image pull must be performed in a separate cgroup.
	I0731 10:53:33.951311   96871 command_runner.go:130] > # separate_pull_cgroup = ""
	I0731 10:53:33.951330   96871 command_runner.go:130] > # List of default capabilities for containers. If it is empty or commented out,
	I0731 10:53:33.951343   96871 command_runner.go:130] > # only the capabilities defined in the containers json file by the user/kube
	I0731 10:53:33.951353   96871 command_runner.go:130] > # will be added.
	I0731 10:53:33.951361   96871 command_runner.go:130] > # default_capabilities = [
	I0731 10:53:33.951368   96871 command_runner.go:130] > # 	"CHOWN",
	I0731 10:53:33.951374   96871 command_runner.go:130] > # 	"DAC_OVERRIDE",
	I0731 10:53:33.951385   96871 command_runner.go:130] > # 	"FSETID",
	I0731 10:53:33.951395   96871 command_runner.go:130] > # 	"FOWNER",
	I0731 10:53:33.951402   96871 command_runner.go:130] > # 	"SETGID",
	I0731 10:53:33.951411   96871 command_runner.go:130] > # 	"SETUID",
	I0731 10:53:33.951420   96871 command_runner.go:130] > # 	"SETPCAP",
	I0731 10:53:33.951430   96871 command_runner.go:130] > # 	"NET_BIND_SERVICE",
	I0731 10:53:33.951438   96871 command_runner.go:130] > # 	"KILL",
	I0731 10:53:33.951446   96871 command_runner.go:130] > # ]
	I0731 10:53:33.951454   96871 command_runner.go:130] > # Add capabilities to the inheritable set, as well as the default group of permitted, bounding and effective.
	I0731 10:53:33.951468   96871 command_runner.go:130] > # If capabilities are expected to work for non-root users, this option should be set.
	I0731 10:53:33.951486   96871 command_runner.go:130] > # add_inheritable_capabilities = true
	I0731 10:53:33.951501   96871 command_runner.go:130] > # List of default sysctls. If it is empty or commented out, only the sysctls
	I0731 10:53:33.951517   96871 command_runner.go:130] > # defined in the container json file by the user/kube will be added.
	I0731 10:53:33.951527   96871 command_runner.go:130] > # default_sysctls = [
	I0731 10:53:33.951535   96871 command_runner.go:130] > # ]
	I0731 10:53:33.951545   96871 command_runner.go:130] > # List of devices on the host that a
	I0731 10:53:33.951554   96871 command_runner.go:130] > # user can specify with the "io.kubernetes.cri-o.Devices" allowed annotation.
	I0731 10:53:33.951563   96871 command_runner.go:130] > # allowed_devices = [
	I0731 10:53:33.951573   96871 command_runner.go:130] > # 	"/dev/fuse",
	I0731 10:53:33.951583   96871 command_runner.go:130] > # ]
	I0731 10:53:33.951594   96871 command_runner.go:130] > # List of additional devices. specified as
	I0731 10:53:33.951636   96871 command_runner.go:130] > # "<device-on-host>:<device-on-container>:<permissions>", for example: "--device=/dev/sdc:/dev/xvdc:rwm".
	I0731 10:53:33.951645   96871 command_runner.go:130] > # If it is empty or commented out, only the devices
	I0731 10:53:33.951658   96871 command_runner.go:130] > # defined in the container json file by the user/kube will be added.
	I0731 10:53:33.951669   96871 command_runner.go:130] > # additional_devices = [
	I0731 10:53:33.951675   96871 command_runner.go:130] > # ]
	I0731 10:53:33.951687   96871 command_runner.go:130] > # List of directories to scan for CDI Spec files.
	I0731 10:53:33.951697   96871 command_runner.go:130] > # cdi_spec_dirs = [
	I0731 10:53:33.951706   96871 command_runner.go:130] > # 	"/etc/cdi",
	I0731 10:53:33.951716   96871 command_runner.go:130] > # 	"/var/run/cdi",
	I0731 10:53:33.951728   96871 command_runner.go:130] > # ]
	I0731 10:53:33.951739   96871 command_runner.go:130] > # Change the default behavior of setting container devices uid/gid from CRI's
	I0731 10:53:33.951750   96871 command_runner.go:130] > # SecurityContext (RunAsUser/RunAsGroup) instead of taking host's uid/gid.
	I0731 10:53:33.951760   96871 command_runner.go:130] > # Defaults to false.
	I0731 10:53:33.951773   96871 command_runner.go:130] > # device_ownership_from_security_context = false
	I0731 10:53:33.951786   96871 command_runner.go:130] > # Path to OCI hooks directories for automatically executed hooks. If one of the
	I0731 10:53:33.951800   96871 command_runner.go:130] > # directories does not exist, then CRI-O will automatically skip them.
	I0731 10:53:33.951809   96871 command_runner.go:130] > # hooks_dir = [
	I0731 10:53:33.951820   96871 command_runner.go:130] > # 	"/usr/share/containers/oci/hooks.d",
	I0731 10:53:33.951826   96871 command_runner.go:130] > # ]
	I0731 10:53:33.951833   96871 command_runner.go:130] > # Path to the file specifying the defaults mounts for each container. The
	I0731 10:53:33.951847   96871 command_runner.go:130] > # format of the config is /SRC:/DST, one mount per line. Notice that CRI-O reads
	I0731 10:53:33.951860   96871 command_runner.go:130] > # its default mounts from the following two files:
	I0731 10:53:33.951869   96871 command_runner.go:130] > #
	I0731 10:53:33.951883   96871 command_runner.go:130] > #   1) /etc/containers/mounts.conf (i.e., default_mounts_file): This is the
	I0731 10:53:33.951897   96871 command_runner.go:130] > #      override file, where users can either add in their own default mounts, or
	I0731 10:53:33.951910   96871 command_runner.go:130] > #      override the default mounts shipped with the package.
	I0731 10:53:33.951918   96871 command_runner.go:130] > #
	I0731 10:53:33.951932   96871 command_runner.go:130] > #   2) /usr/share/containers/mounts.conf: This is the default file read for
	I0731 10:53:33.951946   96871 command_runner.go:130] > #      mounts. If you want CRI-O to read from a different, specific mounts file,
	I0731 10:53:33.951961   96871 command_runner.go:130] > #      you can change the default_mounts_file. Note, if this is done, CRI-O will
	I0731 10:53:33.951973   96871 command_runner.go:130] > #      only add mounts it finds in this file.
	I0731 10:53:33.951982   96871 command_runner.go:130] > #
	I0731 10:53:33.951992   96871 command_runner.go:130] > # default_mounts_file = ""
	I0731 10:53:33.952004   96871 command_runner.go:130] > # Maximum number of processes allowed in a container.
	I0731 10:53:33.952018   96871 command_runner.go:130] > # This option is deprecated. The Kubelet flag '--pod-pids-limit' should be used instead.
	I0731 10:53:33.952026   96871 command_runner.go:130] > # pids_limit = 0
	I0731 10:53:33.952033   96871 command_runner.go:130] > # Maximum sized allowed for the container log file. Negative numbers indicate
	I0731 10:53:33.952046   96871 command_runner.go:130] > # that no size limit is imposed. If it is positive, it must be >= 8192 to
	I0731 10:53:33.952060   96871 command_runner.go:130] > # match/exceed conmon's read buffer. The file is truncated and re-opened so the
	I0731 10:53:33.952082   96871 command_runner.go:130] > # limit is never exceeded. This option is deprecated. The Kubelet flag '--container-log-max-size' should be used instead.
	I0731 10:53:33.952092   96871 command_runner.go:130] > # log_size_max = -1
	I0731 10:53:33.952106   96871 command_runner.go:130] > # Whether container output should be logged to journald in addition to the kuberentes log file
	I0731 10:53:33.952117   96871 command_runner.go:130] > # log_to_journald = false
	I0731 10:53:33.952127   96871 command_runner.go:130] > # Path to directory in which container exit files are written to by conmon.
	I0731 10:53:33.952137   96871 command_runner.go:130] > # container_exits_dir = "/var/run/crio/exits"
	I0731 10:53:33.952153   96871 command_runner.go:130] > # Path to directory for container attach sockets.
	I0731 10:53:33.952166   96871 command_runner.go:130] > # container_attach_socket_dir = "/var/run/crio"
	I0731 10:53:33.952179   96871 command_runner.go:130] > # The prefix to use for the source of the bind mounts.
	I0731 10:53:33.952189   96871 command_runner.go:130] > # bind_mount_prefix = ""
	I0731 10:53:33.952201   96871 command_runner.go:130] > # If set to true, all containers will run in read-only mode.
	I0731 10:53:33.952211   96871 command_runner.go:130] > # read_only = false
	I0731 10:53:33.952224   96871 command_runner.go:130] > # Changes the verbosity of the logs based on the level it is set to. Options
	I0731 10:53:33.952234   96871 command_runner.go:130] > # are fatal, panic, error, warn, info, debug and trace. This option supports
	I0731 10:53:33.952243   96871 command_runner.go:130] > # live configuration reload.
	I0731 10:53:33.952254   96871 command_runner.go:130] > # log_level = "info"
	I0731 10:53:33.952266   96871 command_runner.go:130] > # Filter the log messages by the provided regular expression.
	I0731 10:53:33.952279   96871 command_runner.go:130] > # This option supports live configuration reload.
	I0731 10:53:33.952288   96871 command_runner.go:130] > # log_filter = ""
	I0731 10:53:33.952302   96871 command_runner.go:130] > # The UID mappings for the user namespace of each container. A range is
	I0731 10:53:33.952320   96871 command_runner.go:130] > # specified in the form containerUID:HostUID:Size. Multiple ranges must be
	I0731 10:53:33.952330   96871 command_runner.go:130] > # separated by comma.
	I0731 10:53:33.952336   96871 command_runner.go:130] > # uid_mappings = ""
	I0731 10:53:33.952346   96871 command_runner.go:130] > # The GID mappings for the user namespace of each container. A range is
	I0731 10:53:33.952363   96871 command_runner.go:130] > # specified in the form containerGID:HostGID:Size. Multiple ranges must be
	I0731 10:53:33.952376   96871 command_runner.go:130] > # separated by comma.
	I0731 10:53:33.952385   96871 command_runner.go:130] > # gid_mappings = ""
	I0731 10:53:33.952399   96871 command_runner.go:130] > # If set, CRI-O will reject any attempt to map host UIDs below this value
	I0731 10:53:33.952412   96871 command_runner.go:130] > # into user namespaces.  A negative value indicates that no minimum is set,
	I0731 10:53:33.952423   96871 command_runner.go:130] > # so specifying mappings will only be allowed for pods that run as UID 0.
	I0731 10:53:33.952432   96871 command_runner.go:130] > # minimum_mappable_uid = -1
	I0731 10:53:33.952447   96871 command_runner.go:130] > # If set, CRI-O will reject any attempt to map host GIDs below this value
	I0731 10:53:33.952461   96871 command_runner.go:130] > # into user namespaces.  A negative value indicates that no minimum is set,
	I0731 10:53:33.952474   96871 command_runner.go:130] > # so specifying mappings will only be allowed for pods that run as UID 0.
	I0731 10:53:33.952488   96871 command_runner.go:130] > # minimum_mappable_gid = -1
	I0731 10:53:33.952501   96871 command_runner.go:130] > # The minimal amount of time in seconds to wait before issuing a timeout
	I0731 10:53:33.952511   96871 command_runner.go:130] > # regarding the proper termination of the container. The lowest possible
	I0731 10:53:33.952522   96871 command_runner.go:130] > # value is 30s, whereas lower values are not considered by CRI-O.
	I0731 10:53:33.952532   96871 command_runner.go:130] > # ctr_stop_timeout = 30
	I0731 10:53:33.952543   96871 command_runner.go:130] > # drop_infra_ctr determines whether CRI-O drops the infra container
	I0731 10:53:33.952560   96871 command_runner.go:130] > # when a pod does not have a private PID namespace, and does not use
	I0731 10:53:33.952571   96871 command_runner.go:130] > # a kernel separating runtime (like kata).
	I0731 10:53:33.952586   96871 command_runner.go:130] > # It requires manage_ns_lifecycle to be true.
	I0731 10:53:33.952596   96871 command_runner.go:130] > # drop_infra_ctr = true
	I0731 10:53:33.952606   96871 command_runner.go:130] > # infra_ctr_cpuset determines what CPUs will be used to run infra containers.
	I0731 10:53:33.952617   96871 command_runner.go:130] > # You can use linux CPU list format to specify desired CPUs.
	I0731 10:53:33.952634   96871 command_runner.go:130] > # To get better isolation for guaranteed pods, set this parameter to be equal to kubelet reserved-cpus.
	I0731 10:53:33.952645   96871 command_runner.go:130] > # infra_ctr_cpuset = ""
	I0731 10:53:33.952658   96871 command_runner.go:130] > # The directory where the state of the managed namespaces gets tracked.
	I0731 10:53:33.952669   96871 command_runner.go:130] > # Only used when manage_ns_lifecycle is true.
	I0731 10:53:33.952680   96871 command_runner.go:130] > # namespaces_dir = "/var/run"
	I0731 10:53:33.952691   96871 command_runner.go:130] > # pinns_path is the path to find the pinns binary, which is needed to manage namespace lifecycle
	I0731 10:53:33.952700   96871 command_runner.go:130] > # pinns_path = ""
	I0731 10:53:33.952715   96871 command_runner.go:130] > # default_runtime is the _name_ of the OCI runtime to be used as the default.
	I0731 10:53:33.952728   96871 command_runner.go:130] > # The name is matched against the runtimes map below. If this value is changed,
	I0731 10:53:33.952743   96871 command_runner.go:130] > # the corresponding existing entry from the runtimes map below will be ignored.
	I0731 10:53:33.952753   96871 command_runner.go:130] > # default_runtime = "runc"
	I0731 10:53:33.952764   96871 command_runner.go:130] > # A list of paths that, when absent from the host,
	I0731 10:53:33.952780   96871 command_runner.go:130] > # will cause a container creation to fail (as opposed to the current behavior being created as a directory).
	I0731 10:53:33.952795   96871 command_runner.go:130] > # This option is to protect from source locations whose existence as a directory could jepordize the health of the node, and whose
	I0731 10:53:33.952807   96871 command_runner.go:130] > # creation as a file is not desired either.
	I0731 10:53:33.952825   96871 command_runner.go:130] > # An example is /etc/hostname, which will cause failures on reboot if it's created as a directory, but often doesn't exist because
	I0731 10:53:33.952837   96871 command_runner.go:130] > # the hostname is being managed dynamically.
	I0731 10:53:33.952847   96871 command_runner.go:130] > # absent_mount_sources_to_reject = [
	I0731 10:53:33.952856   96871 command_runner.go:130] > # ]
	I0731 10:53:33.952869   96871 command_runner.go:130] > # The "crio.runtime.runtimes" table defines a list of OCI compatible runtimes.
	I0731 10:53:33.952881   96871 command_runner.go:130] > # The runtime to use is picked based on the runtime handler provided by the CRI.
	I0731 10:53:33.952891   96871 command_runner.go:130] > # If no runtime handler is provided, the runtime will be picked based on the level
	I0731 10:53:33.952906   96871 command_runner.go:130] > # of trust of the workload. Each entry in the table should follow the format:
	I0731 10:53:33.952915   96871 command_runner.go:130] > #
	I0731 10:53:33.952923   96871 command_runner.go:130] > #[crio.runtime.runtimes.runtime-handler]
	I0731 10:53:33.952934   96871 command_runner.go:130] > #  runtime_path = "/path/to/the/executable"
	I0731 10:53:33.952944   96871 command_runner.go:130] > #  runtime_type = "oci"
	I0731 10:53:33.952955   96871 command_runner.go:130] > #  runtime_root = "/path/to/the/root"
	I0731 10:53:33.952966   96871 command_runner.go:130] > #  privileged_without_host_devices = false
	I0731 10:53:33.952976   96871 command_runner.go:130] > #  allowed_annotations = []
	I0731 10:53:33.952984   96871 command_runner.go:130] > # Where:
	I0731 10:53:33.952993   96871 command_runner.go:130] > # - runtime-handler: name used to identify the runtime
	I0731 10:53:33.953011   96871 command_runner.go:130] > # - runtime_path (optional, string): absolute path to the runtime executable in
	I0731 10:53:33.953025   96871 command_runner.go:130] > #   the host filesystem. If omitted, the runtime-handler identifier should match
	I0731 10:53:33.953040   96871 command_runner.go:130] > #   the runtime executable name, and the runtime executable should be placed
	I0731 10:53:33.953049   96871 command_runner.go:130] > #   in $PATH.
	I0731 10:53:33.953062   96871 command_runner.go:130] > # - runtime_type (optional, string): type of runtime, one of: "oci", "vm". If
	I0731 10:53:33.953073   96871 command_runner.go:130] > #   omitted, an "oci" runtime is assumed.
	I0731 10:53:33.953086   96871 command_runner.go:130] > # - runtime_root (optional, string): root directory for storage of containers
	I0731 10:53:33.953093   96871 command_runner.go:130] > #   state.
	I0731 10:53:33.953102   96871 command_runner.go:130] > # - runtime_config_path (optional, string): the path for the runtime configuration
	I0731 10:53:33.953116   96871 command_runner.go:130] > #   file. This can only be used with when using the VM runtime_type.
	I0731 10:53:33.953130   96871 command_runner.go:130] > # - privileged_without_host_devices (optional, bool): an option for restricting
	I0731 10:53:33.953143   96871 command_runner.go:130] > #   host devices from being passed to privileged containers.
	I0731 10:53:33.953156   96871 command_runner.go:130] > # - allowed_annotations (optional, array of strings): an option for specifying
	I0731 10:53:33.953170   96871 command_runner.go:130] > #   a list of experimental annotations that this runtime handler is allowed to process.
	I0731 10:53:33.953178   96871 command_runner.go:130] > #   The currently recognized values are:
	I0731 10:53:33.953185   96871 command_runner.go:130] > #   "io.kubernetes.cri-o.userns-mode" for configuring a user namespace for the pod.
	I0731 10:53:33.953201   96871 command_runner.go:130] > #   "io.kubernetes.cri-o.cgroup2-mount-hierarchy-rw" for mounting cgroups writably when set to "true".
	I0731 10:53:33.953215   96871 command_runner.go:130] > #   "io.kubernetes.cri-o.Devices" for configuring devices for the pod.
	I0731 10:53:33.953231   96871 command_runner.go:130] > #   "io.kubernetes.cri-o.ShmSize" for configuring the size of /dev/shm.
	I0731 10:53:33.953247   96871 command_runner.go:130] > #   "io.kubernetes.cri-o.UnifiedCgroup.$CTR_NAME" for configuring the cgroup v2 unified block for a container.
	I0731 10:53:33.953261   96871 command_runner.go:130] > #   "io.containers.trace-syscall" for tracing syscalls via the OCI seccomp BPF hook.
	I0731 10:53:33.953274   96871 command_runner.go:130] > #   "io.kubernetes.cri.rdt-class" for setting the RDT class of a container
	I0731 10:53:33.953285   96871 command_runner.go:130] > # - monitor_exec_cgroup (optional, string): if set to "container", indicates exec probes
	I0731 10:53:33.953295   96871 command_runner.go:130] > #   should be moved to the container's cgroup
	I0731 10:53:33.953306   96871 command_runner.go:130] > [crio.runtime.runtimes.runc]
	I0731 10:53:33.953319   96871 command_runner.go:130] > runtime_path = "/usr/lib/cri-o-runc/sbin/runc"
	I0731 10:53:33.953329   96871 command_runner.go:130] > runtime_type = "oci"
	I0731 10:53:33.953339   96871 command_runner.go:130] > runtime_root = "/run/runc"
	I0731 10:53:33.953349   96871 command_runner.go:130] > runtime_config_path = ""
	I0731 10:53:33.953358   96871 command_runner.go:130] > monitor_path = ""
	I0731 10:53:33.953367   96871 command_runner.go:130] > monitor_cgroup = ""
	I0731 10:53:33.953375   96871 command_runner.go:130] > monitor_exec_cgroup = ""
	I0731 10:53:33.953449   96871 command_runner.go:130] > # crun is a fast and lightweight fully featured OCI runtime and C library for
	I0731 10:53:33.953462   96871 command_runner.go:130] > # running containers
	I0731 10:53:33.953466   96871 command_runner.go:130] > #[crio.runtime.runtimes.crun]
	I0731 10:53:33.953473   96871 command_runner.go:130] > # Kata Containers is an OCI runtime, where containers are run inside lightweight
	I0731 10:53:33.953493   96871 command_runner.go:130] > # VMs. Kata provides additional isolation towards the host, minimizing the host attack
	I0731 10:53:33.953506   96871 command_runner.go:130] > # surface and mitigating the consequences of containers breakout.
	I0731 10:53:33.953518   96871 command_runner.go:130] > # Kata Containers with the default configured VMM
	I0731 10:53:33.953529   96871 command_runner.go:130] > #[crio.runtime.runtimes.kata-runtime]
	I0731 10:53:33.953539   96871 command_runner.go:130] > # Kata Containers with the QEMU VMM
	I0731 10:53:33.953548   96871 command_runner.go:130] > #[crio.runtime.runtimes.kata-qemu]
	I0731 10:53:33.953567   96871 command_runner.go:130] > # Kata Containers with the Firecracker VMM
	I0731 10:53:33.953578   96871 command_runner.go:130] > #[crio.runtime.runtimes.kata-fc]
	I0731 10:53:33.953594   96871 command_runner.go:130] > # The workloads table defines ways to customize containers with different resources
	I0731 10:53:33.953606   96871 command_runner.go:130] > # that work based on annotations, rather than the CRI.
	I0731 10:53:33.953619   96871 command_runner.go:130] > # Note, the behavior of this table is EXPERIMENTAL and may change at any time.
	I0731 10:53:33.953635   96871 command_runner.go:130] > # Each workload, has a name, activation_annotation, annotation_prefix and set of resources it supports mutating.
	I0731 10:53:33.953648   96871 command_runner.go:130] > # The currently supported resources are "cpu" (to configure the cpu shares) and "cpuset" to configure the cpuset.
	I0731 10:53:33.953659   96871 command_runner.go:130] > # Each resource can have a default value specified, or be empty.
	I0731 10:53:33.953679   96871 command_runner.go:130] > # For a container to opt-into this workload, the pod should be configured with the annotation $activation_annotation (key only, value is ignored).
	I0731 10:53:33.953696   96871 command_runner.go:130] > # To customize per-container, an annotation of the form $annotation_prefix.$resource/$ctrName = "value" can be specified
	I0731 10:53:33.953709   96871 command_runner.go:130] > # signifying for that resource type to override the default value.
	I0731 10:53:33.953725   96871 command_runner.go:130] > # If the annotation_prefix is not present, every container in the pod will be given the default values.
	I0731 10:53:33.953737   96871 command_runner.go:130] > # Example:
	I0731 10:53:33.953748   96871 command_runner.go:130] > # [crio.runtime.workloads.workload-type]
	I0731 10:53:33.953757   96871 command_runner.go:130] > # activation_annotation = "io.crio/workload"
	I0731 10:53:33.953765   96871 command_runner.go:130] > # annotation_prefix = "io.crio.workload-type"
	I0731 10:53:33.953776   96871 command_runner.go:130] > # [crio.runtime.workloads.workload-type.resources]
	I0731 10:53:33.953787   96871 command_runner.go:130] > # cpuset = 0
	I0731 10:53:33.953794   96871 command_runner.go:130] > # cpushares = "0-1"
	I0731 10:53:33.953803   96871 command_runner.go:130] > # Where:
	I0731 10:53:33.953814   96871 command_runner.go:130] > # The workload name is workload-type.
	I0731 10:53:33.953829   96871 command_runner.go:130] > # To specify, the pod must have the "io.crio.workload" annotation (this is a precise string match).
	I0731 10:53:33.953841   96871 command_runner.go:130] > # This workload supports setting cpuset and cpu resources.
	I0731 10:53:33.953852   96871 command_runner.go:130] > # annotation_prefix is used to customize the different resources.
	I0731 10:53:33.953863   96871 command_runner.go:130] > # To configure the cpu shares a container gets in the example above, the pod would have to have the following annotation:
	I0731 10:53:33.953876   96871 command_runner.go:130] > # "io.crio.workload-type/$container_name = {"cpushares": "value"}"
	I0731 10:53:33.953885   96871 command_runner.go:130] > # 
	I0731 10:53:33.953897   96871 command_runner.go:130] > # The crio.image table contains settings pertaining to the management of OCI images.
	I0731 10:53:33.953906   96871 command_runner.go:130] > #
	I0731 10:53:33.953921   96871 command_runner.go:130] > # CRI-O reads its configured registries defaults from the system wide
	I0731 10:53:33.953938   96871 command_runner.go:130] > # containers-registries.conf(5) located in /etc/containers/registries.conf. If
	I0731 10:53:33.953951   96871 command_runner.go:130] > # you want to modify just CRI-O, you can change the registries configuration in
	I0731 10:53:33.953960   96871 command_runner.go:130] > # this file. Otherwise, leave insecure_registries and registries commented out to
	I0731 10:53:33.953972   96871 command_runner.go:130] > # use the system's defaults from /etc/containers/registries.conf.
	I0731 10:53:33.953983   96871 command_runner.go:130] > [crio.image]
	I0731 10:53:33.953993   96871 command_runner.go:130] > # Default transport for pulling images from a remote container storage.
	I0731 10:53:33.954005   96871 command_runner.go:130] > # default_transport = "docker://"
	I0731 10:53:33.954018   96871 command_runner.go:130] > # The path to a file containing credentials necessary for pulling images from
	I0731 10:53:33.954032   96871 command_runner.go:130] > # secure registries. The file is similar to that of /var/lib/kubelet/config.json
	I0731 10:53:33.954042   96871 command_runner.go:130] > # global_auth_file = ""
	I0731 10:53:33.954052   96871 command_runner.go:130] > # The image used to instantiate infra containers.
	I0731 10:53:33.954060   96871 command_runner.go:130] > # This option supports live configuration reload.
	I0731 10:53:33.954071   96871 command_runner.go:130] > pause_image = "registry.k8s.io/pause:3.9"
	I0731 10:53:33.954086   96871 command_runner.go:130] > # The path to a file containing credentials specific for pulling the pause_image from
	I0731 10:53:33.954100   96871 command_runner.go:130] > # above. The file is similar to that of /var/lib/kubelet/config.json
	I0731 10:53:33.954111   96871 command_runner.go:130] > # This option supports live configuration reload.
	I0731 10:53:33.954121   96871 command_runner.go:130] > # pause_image_auth_file = ""
	I0731 10:53:33.954134   96871 command_runner.go:130] > # The command to run to have a container stay in the paused state.
	I0731 10:53:33.954149   96871 command_runner.go:130] > # When explicitly set to "", it will fallback to the entrypoint and command
	I0731 10:53:33.954160   96871 command_runner.go:130] > # specified in the pause image. When commented out, it will fallback to the
	I0731 10:53:33.954178   96871 command_runner.go:130] > # default: "/pause". This option supports live configuration reload.
	I0731 10:53:33.954204   96871 command_runner.go:130] > # pause_command = "/pause"
	I0731 10:53:33.954219   96871 command_runner.go:130] > # Path to the file which decides what sort of policy we use when deciding
	I0731 10:53:33.954233   96871 command_runner.go:130] > # whether or not to trust an image that we've pulled. It is not recommended that
	I0731 10:53:33.954247   96871 command_runner.go:130] > # this option be used, as the default behavior of using the system-wide default
	I0731 10:53:33.954260   96871 command_runner.go:130] > # policy (i.e., /etc/containers/policy.json) is most often preferred. Please
	I0731 10:53:33.954269   96871 command_runner.go:130] > # refer to containers-policy.json(5) for more details.
	I0731 10:53:33.954277   96871 command_runner.go:130] > # signature_policy = ""
	I0731 10:53:33.954297   96871 command_runner.go:130] > # List of registries to skip TLS verification for pulling images. Please
	I0731 10:53:33.954311   96871 command_runner.go:130] > # consider configuring the registries via /etc/containers/registries.conf before
	I0731 10:53:33.954321   96871 command_runner.go:130] > # changing them here.
	I0731 10:53:33.954332   96871 command_runner.go:130] > # insecure_registries = [
	I0731 10:53:33.954340   96871 command_runner.go:130] > # ]
	I0731 10:53:33.954354   96871 command_runner.go:130] > # Controls how image volumes are handled. The valid values are mkdir, bind and
	I0731 10:53:33.954366   96871 command_runner.go:130] > # ignore; the latter will ignore volumes entirely.
	I0731 10:53:33.954375   96871 command_runner.go:130] > # image_volumes = "mkdir"
	I0731 10:53:33.954393   96871 command_runner.go:130] > # Temporary directory to use for storing big files
	I0731 10:53:33.954404   96871 command_runner.go:130] > # big_files_temporary_dir = ""
	I0731 10:53:33.954419   96871 command_runner.go:130] > # The crio.network table containers settings pertaining to the management of
	I0731 10:53:33.954428   96871 command_runner.go:130] > # CNI plugins.
	I0731 10:53:33.954438   96871 command_runner.go:130] > [crio.network]
	I0731 10:53:33.954451   96871 command_runner.go:130] > # The default CNI network name to be selected. If not set or "", then
	I0731 10:53:33.954463   96871 command_runner.go:130] > # CRI-O will pick-up the first one found in network_dir.
	I0731 10:53:33.954471   96871 command_runner.go:130] > # cni_default_network = ""
	I0731 10:53:33.954480   96871 command_runner.go:130] > # Path to the directory where CNI configuration files are located.
	I0731 10:53:33.954495   96871 command_runner.go:130] > # network_dir = "/etc/cni/net.d/"
	I0731 10:53:33.954509   96871 command_runner.go:130] > # Paths to directories where CNI plugin binaries are located.
	I0731 10:53:33.954519   96871 command_runner.go:130] > # plugin_dirs = [
	I0731 10:53:33.954529   96871 command_runner.go:130] > # 	"/opt/cni/bin/",
	I0731 10:53:33.954537   96871 command_runner.go:130] > # ]
	I0731 10:53:33.954550   96871 command_runner.go:130] > # A necessary configuration for Prometheus based metrics retrieval
	I0731 10:53:33.954559   96871 command_runner.go:130] > [crio.metrics]
	I0731 10:53:33.954570   96871 command_runner.go:130] > # Globally enable or disable metrics support.
	I0731 10:53:33.954578   96871 command_runner.go:130] > # enable_metrics = false
	I0731 10:53:33.954587   96871 command_runner.go:130] > # Specify enabled metrics collectors.
	I0731 10:53:33.954598   96871 command_runner.go:130] > # Per default all metrics are enabled.
	I0731 10:53:33.954611   96871 command_runner.go:130] > # It is possible, to prefix the metrics with "container_runtime_" and "crio_".
	I0731 10:53:33.954625   96871 command_runner.go:130] > # For example, the metrics collector "operations" would be treated in the same
	I0731 10:53:33.954638   96871 command_runner.go:130] > # way as "crio_operations" and "container_runtime_crio_operations".
	I0731 10:53:33.954648   96871 command_runner.go:130] > # metrics_collectors = [
	I0731 10:53:33.954657   96871 command_runner.go:130] > # 	"operations",
	I0731 10:53:33.954669   96871 command_runner.go:130] > # 	"operations_latency_microseconds_total",
	I0731 10:53:33.954678   96871 command_runner.go:130] > # 	"operations_latency_microseconds",
	I0731 10:53:33.954685   96871 command_runner.go:130] > # 	"operations_errors",
	I0731 10:53:33.954692   96871 command_runner.go:130] > # 	"image_pulls_by_digest",
	I0731 10:53:33.954702   96871 command_runner.go:130] > # 	"image_pulls_by_name",
	I0731 10:53:33.954713   96871 command_runner.go:130] > # 	"image_pulls_by_name_skipped",
	I0731 10:53:33.954723   96871 command_runner.go:130] > # 	"image_pulls_failures",
	I0731 10:53:33.954734   96871 command_runner.go:130] > # 	"image_pulls_successes",
	I0731 10:53:33.954745   96871 command_runner.go:130] > # 	"image_pulls_layer_size",
	I0731 10:53:33.954754   96871 command_runner.go:130] > # 	"image_layer_reuse",
	I0731 10:53:33.954764   96871 command_runner.go:130] > # 	"containers_oom_total",
	I0731 10:53:33.954777   96871 command_runner.go:130] > # 	"containers_oom",
	I0731 10:53:33.954785   96871 command_runner.go:130] > # 	"processes_defunct",
	I0731 10:53:33.954791   96871 command_runner.go:130] > # 	"operations_total",
	I0731 10:53:33.954801   96871 command_runner.go:130] > # 	"operations_latency_seconds",
	I0731 10:53:33.954813   96871 command_runner.go:130] > # 	"operations_latency_seconds_total",
	I0731 10:53:33.954824   96871 command_runner.go:130] > # 	"operations_errors_total",
	I0731 10:53:33.954834   96871 command_runner.go:130] > # 	"image_pulls_bytes_total",
	I0731 10:53:33.954846   96871 command_runner.go:130] > # 	"image_pulls_skipped_bytes_total",
	I0731 10:53:33.954857   96871 command_runner.go:130] > # 	"image_pulls_failure_total",
	I0731 10:53:33.954867   96871 command_runner.go:130] > # 	"image_pulls_success_total",
	I0731 10:53:33.954877   96871 command_runner.go:130] > # 	"image_layer_reuse_total",
	I0731 10:53:33.954884   96871 command_runner.go:130] > # 	"containers_oom_count_total",
	I0731 10:53:33.954889   96871 command_runner.go:130] > # ]
	I0731 10:53:33.954901   96871 command_runner.go:130] > # The port on which the metrics server will listen.
	I0731 10:53:33.954911   96871 command_runner.go:130] > # metrics_port = 9090
	I0731 10:53:33.954920   96871 command_runner.go:130] > # Local socket path to bind the metrics server to
	I0731 10:53:33.954931   96871 command_runner.go:130] > # metrics_socket = ""
	I0731 10:53:33.954942   96871 command_runner.go:130] > # The certificate for the secure metrics server.
	I0731 10:53:33.954958   96871 command_runner.go:130] > # If the certificate is not available on disk, then CRI-O will generate a
	I0731 10:53:33.954972   96871 command_runner.go:130] > # self-signed one. CRI-O also watches for changes of this path and reloads the
	I0731 10:53:33.954981   96871 command_runner.go:130] > # certificate on any modification event.
	I0731 10:53:33.954989   96871 command_runner.go:130] > # metrics_cert = ""
	I0731 10:53:33.954998   96871 command_runner.go:130] > # The certificate key for the secure metrics server.
	I0731 10:53:33.955010   96871 command_runner.go:130] > # Behaves in the same way as the metrics_cert.
	I0731 10:53:33.955020   96871 command_runner.go:130] > # metrics_key = ""
	I0731 10:53:33.955034   96871 command_runner.go:130] > # A necessary configuration for OpenTelemetry trace data exporting
	I0731 10:53:33.955043   96871 command_runner.go:130] > [crio.tracing]
	I0731 10:53:33.955056   96871 command_runner.go:130] > # Globally enable or disable exporting OpenTelemetry traces.
	I0731 10:53:33.955066   96871 command_runner.go:130] > # enable_tracing = false
	I0731 10:53:33.955076   96871 command_runner.go:130] > # Address on which the gRPC trace collector listens on.
	I0731 10:53:33.955083   96871 command_runner.go:130] > # tracing_endpoint = "0.0.0.0:4317"
	I0731 10:53:33.955092   96871 command_runner.go:130] > # Number of samples to collect per million spans.
	I0731 10:53:33.955103   96871 command_runner.go:130] > # tracing_sampling_rate_per_million = 0
	I0731 10:53:33.955117   96871 command_runner.go:130] > # Necessary information pertaining to container and pod stats reporting.
	I0731 10:53:33.955127   96871 command_runner.go:130] > [crio.stats]
	I0731 10:53:33.955140   96871 command_runner.go:130] > # The number of seconds between collecting pod and container stats.
	I0731 10:53:33.955156   96871 command_runner.go:130] > # If set to 0, the stats are collected on-demand instead.
	I0731 10:53:33.955165   96871 command_runner.go:130] > # stats_collection_period = 0
	I0731 10:53:33.955248   96871 cni.go:84] Creating CNI manager for ""
	I0731 10:53:33.955257   96871 cni.go:136] 2 nodes found, recommending kindnet
	I0731 10:53:33.955268   96871 kubeadm.go:87] Using pod CIDR: 10.244.0.0/16
	I0731 10:53:33.955299   96871 kubeadm.go:176] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.58.3 APIServerPort:8443 KubernetesVersion:v1.27.3 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:multinode-776386 NodeName:multinode-776386-m02 DNSDomain:cluster.local CRISocket:/var/run/crio/crio.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.58.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.58.3 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/e
tc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
	I0731 10:53:33.955446   96871 kubeadm.go:181] kubeadm config:
	apiVersion: kubeadm.k8s.io/v1beta3
	kind: InitConfiguration
	localAPIEndpoint:
	  advertiseAddress: 192.168.58.3
	  bindPort: 8443
	bootstrapTokens:
	  - groups:
	      - system:bootstrappers:kubeadm:default-node-token
	    ttl: 24h0m0s
	    usages:
	      - signing
	      - authentication
	nodeRegistration:
	  criSocket: unix:///var/run/crio/crio.sock
	  name: "multinode-776386-m02"
	  kubeletExtraArgs:
	    node-ip: 192.168.58.3
	  taints: []
	---
	apiVersion: kubeadm.k8s.io/v1beta3
	kind: ClusterConfiguration
	apiServer:
	  certSANs: ["127.0.0.1", "localhost", "192.168.58.2"]
	  extraArgs:
	    enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
	controllerManager:
	  extraArgs:
	    allocate-node-cidrs: "true"
	    leader-elect: "false"
	scheduler:
	  extraArgs:
	    leader-elect: "false"
	certificatesDir: /var/lib/minikube/certs
	clusterName: mk
	controlPlaneEndpoint: control-plane.minikube.internal:8443
	etcd:
	  local:
	    dataDir: /var/lib/minikube/etcd
	    extraArgs:
	      proxy-refresh-interval: "70000"
	kubernetesVersion: v1.27.3
	networking:
	  dnsDomain: cluster.local
	  podSubnet: "10.244.0.0/16"
	  serviceSubnet: 10.96.0.0/12
	---
	apiVersion: kubelet.config.k8s.io/v1beta1
	kind: KubeletConfiguration
	authentication:
	  x509:
	    clientCAFile: /var/lib/minikube/certs/ca.crt
	cgroupDriver: cgroupfs
	hairpinMode: hairpin-veth
	runtimeRequestTimeout: 15m
	clusterDomain: "cluster.local"
	# disable disk resource management by default
	imageGCHighThresholdPercent: 100
	evictionHard:
	  nodefs.available: "0%!"(MISSING)
	  nodefs.inodesFree: "0%!"(MISSING)
	  imagefs.available: "0%!"(MISSING)
	failSwapOn: false
	staticPodPath: /etc/kubernetes/manifests
	---
	apiVersion: kubeproxy.config.k8s.io/v1alpha1
	kind: KubeProxyConfiguration
	clusterCIDR: "10.244.0.0/16"
	metricsBindAddress: 0.0.0.0:10249
	conntrack:
	  maxPerCore: 0
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
	  tcpEstablishedTimeout: 0s
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
	  tcpCloseWaitTimeout: 0s
	
	I0731 10:53:33.955509   96871 kubeadm.go:976] kubelet [Unit]
	Wants=crio.service
	
	[Service]
	ExecStart=
	ExecStart=/var/lib/minikube/binaries/v1.27.3/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroups-per-qos=false --config=/var/lib/kubelet/config.yaml --container-runtime-endpoint=unix:///var/run/crio/crio.sock --enforce-node-allocatable= --hostname-override=multinode-776386-m02 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.58.3
	
	[Install]
	 config:
	{KubernetesVersion:v1.27.3 ClusterName:multinode-776386 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:}
	I0731 10:53:33.955564   96871 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.27.3
	I0731 10:53:33.962629   96871 command_runner.go:130] > kubeadm
	I0731 10:53:33.962648   96871 command_runner.go:130] > kubectl
	I0731 10:53:33.962655   96871 command_runner.go:130] > kubelet
	I0731 10:53:33.963220   96871 binaries.go:44] Found k8s binaries, skipping transfer
	I0731 10:53:33.963274   96871 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system
	I0731 10:53:33.970446   96871 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (430 bytes)
	I0731 10:53:33.985304   96871 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
	I0731 10:53:34.000267   96871 ssh_runner.go:195] Run: grep 192.168.58.2	control-plane.minikube.internal$ /etc/hosts
	I0731 10:53:34.003140   96871 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.58.2	control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I0731 10:53:34.012084   96871 host.go:66] Checking if "multinode-776386" exists ...
	I0731 10:53:34.012324   96871 config.go:182] Loaded profile config "multinode-776386": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.27.3
	I0731 10:53:34.012310   96871 start.go:301] JoinCluster: &{Name:multinode-776386 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 Memory:2200 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.27.3 ClusterName:multinode-776386 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain
:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP:192.168.58.2 Port:8443 KubernetesVersion:v1.27.3 ContainerRuntime:crio ControlPlane:true Worker:true} {Name:m02 IP:192.168.58.3 Port:0 KubernetesVersion:v1.27.3 ContainerRuntime:crio ControlPlane:false Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker
MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0}
	I0731 10:53:34.012409   96871 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.27.3:$PATH" kubeadm token create --print-join-command --ttl=0"
	I0731 10:53:34.012454   96871 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-776386
	I0731 10:53:34.028389   96871 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32847 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/multinode-776386/id_rsa Username:docker}
	I0731 10:53:34.168739   96871 command_runner.go:130] > kubeadm join control-plane.minikube.internal:8443 --token lggndr.ngtsrdbrh3l968xu --discovery-token-ca-cert-hash sha256:332f1e2c8f6e50afb8fc2995698452a3be7de85c383b512a61c05acf2d3047a4 
	I0731 10:53:34.172364   96871 start.go:322] trying to join worker node "m02" to cluster: &{Name:m02 IP:192.168.58.3 Port:0 KubernetesVersion:v1.27.3 ContainerRuntime:crio ControlPlane:false Worker:true}
	I0731 10:53:34.172416   96871 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.27.3:$PATH" kubeadm join control-plane.minikube.internal:8443 --token lggndr.ngtsrdbrh3l968xu --discovery-token-ca-cert-hash sha256:332f1e2c8f6e50afb8fc2995698452a3be7de85c383b512a61c05acf2d3047a4 --ignore-preflight-errors=all --cri-socket /var/run/crio/crio.sock --node-name=multinode-776386-m02"
	I0731 10:53:34.205961   96871 command_runner.go:130] ! W0731 10:53:34.205496    1112 initconfiguration.go:120] Usage of CRI endpoints without URL scheme is deprecated and can cause kubelet errors in the future. Automatically prepending scheme "unix" to the "criSocket" with value "/var/run/crio/crio.sock". Please update your configuration!
	I0731 10:53:34.232690   96871 command_runner.go:130] ! 	[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1038-gcp\n", err: exit status 1
	I0731 10:53:34.294871   96871 command_runner.go:130] ! 	[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
	I0731 10:53:36.416479   96871 command_runner.go:130] > [preflight] Running pre-flight checks
	I0731 10:53:36.416520   96871 command_runner.go:130] > [preflight] The system verification failed. Printing the output from the verification:
	I0731 10:53:36.416533   96871 command_runner.go:130] > KERNEL_VERSION: 5.15.0-1038-gcp
	I0731 10:53:36.416541   96871 command_runner.go:130] > OS: Linux
	I0731 10:53:36.416550   96871 command_runner.go:130] > CGROUPS_CPU: enabled
	I0731 10:53:36.416565   96871 command_runner.go:130] > CGROUPS_CPUACCT: enabled
	I0731 10:53:36.416575   96871 command_runner.go:130] > CGROUPS_CPUSET: enabled
	I0731 10:53:36.416581   96871 command_runner.go:130] > CGROUPS_DEVICES: enabled
	I0731 10:53:36.416586   96871 command_runner.go:130] > CGROUPS_FREEZER: enabled
	I0731 10:53:36.416594   96871 command_runner.go:130] > CGROUPS_MEMORY: enabled
	I0731 10:53:36.416600   96871 command_runner.go:130] > CGROUPS_PIDS: enabled
	I0731 10:53:36.416607   96871 command_runner.go:130] > CGROUPS_HUGETLB: enabled
	I0731 10:53:36.416612   96871 command_runner.go:130] > CGROUPS_BLKIO: enabled
	I0731 10:53:36.416620   96871 command_runner.go:130] > [preflight] Reading configuration from the cluster...
	I0731 10:53:36.416627   96871 command_runner.go:130] > [preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
	I0731 10:53:36.416636   96871 command_runner.go:130] > [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
	I0731 10:53:36.416645   96871 command_runner.go:130] > [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
	I0731 10:53:36.416653   96871 command_runner.go:130] > [kubelet-start] Starting the kubelet
	I0731 10:53:36.416663   96871 command_runner.go:130] > [kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
	I0731 10:53:36.416670   96871 command_runner.go:130] > This node has joined the cluster:
	I0731 10:53:36.416676   96871 command_runner.go:130] > * Certificate signing request was sent to apiserver and a response was received.
	I0731 10:53:36.416684   96871 command_runner.go:130] > * The Kubelet was informed of the new secure connection details.
	I0731 10:53:36.416692   96871 command_runner.go:130] > Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
	I0731 10:53:36.416717   96871 ssh_runner.go:235] Completed: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.27.3:$PATH" kubeadm join control-plane.minikube.internal:8443 --token lggndr.ngtsrdbrh3l968xu --discovery-token-ca-cert-hash sha256:332f1e2c8f6e50afb8fc2995698452a3be7de85c383b512a61c05acf2d3047a4 --ignore-preflight-errors=all --cri-socket /var/run/crio/crio.sock --node-name=multinode-776386-m02": (2.244284111s)
	I0731 10:53:36.416738   96871 ssh_runner.go:195] Run: /bin/bash -c "sudo systemctl daemon-reload && sudo systemctl enable kubelet && sudo systemctl start kubelet"
	I0731 10:53:36.567026   96871 command_runner.go:130] ! Created symlink /etc/systemd/system/multi-user.target.wants/kubelet.service → /lib/systemd/system/kubelet.service.
	I0731 10:53:36.567063   96871 start.go:303] JoinCluster complete in 2.554753294s
	I0731 10:53:36.567075   96871 cni.go:84] Creating CNI manager for ""
	I0731 10:53:36.567082   96871 cni.go:136] 2 nodes found, recommending kindnet
	I0731 10:53:36.567133   96871 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
	I0731 10:53:36.570602   96871 command_runner.go:130] >   File: /opt/cni/bin/portmap
	I0731 10:53:36.570626   96871 command_runner.go:130] >   Size: 3955775   	Blocks: 7728       IO Block: 4096   regular file
	I0731 10:53:36.570635   96871 command_runner.go:130] > Device: 37h/55d	Inode: 805394      Links: 1
	I0731 10:53:36.570645   96871 command_runner.go:130] > Access: (0755/-rwxr-xr-x)  Uid: (    0/    root)   Gid: (    0/    root)
	I0731 10:53:36.570656   96871 command_runner.go:130] > Access: 2023-05-09 19:53:47.000000000 +0000
	I0731 10:53:36.570667   96871 command_runner.go:130] > Modify: 2023-05-09 19:53:47.000000000 +0000
	I0731 10:53:36.570679   96871 command_runner.go:130] > Change: 2023-07-31 10:33:56.255880962 +0000
	I0731 10:53:36.570692   96871 command_runner.go:130] >  Birth: 2023-07-31 10:33:56.227878277 +0000
	I0731 10:53:36.570741   96871 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.27.3/kubectl ...
	I0731 10:53:36.570754   96871 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2438 bytes)
	I0731 10:53:36.586432   96871 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.3/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
	I0731 10:53:36.821421   96871 command_runner.go:130] > clusterrole.rbac.authorization.k8s.io/kindnet unchanged
	I0731 10:53:36.824880   96871 command_runner.go:130] > clusterrolebinding.rbac.authorization.k8s.io/kindnet unchanged
	I0731 10:53:36.827286   96871 command_runner.go:130] > serviceaccount/kindnet unchanged
	I0731 10:53:36.837141   96871 command_runner.go:130] > daemonset.apps/kindnet configured
	I0731 10:53:36.841247   96871 loader.go:373] Config loaded from file:  /home/jenkins/minikube-integration/16969-5799/kubeconfig
	I0731 10:53:36.841489   96871 kapi.go:59] client config for multinode-776386: &rest.Config{Host:"https://192.168.58.2:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/client.crt", KeyFile:"/home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/client.key", CAFile:"/home/jenkins/minikube-integration/16969-5799/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProt
os:[]string(nil)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x19c2840), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
	I0731 10:53:36.841850   96871 round_trippers.go:463] GET https://192.168.58.2:8443/apis/apps/v1/namespaces/kube-system/deployments/coredns/scale
	I0731 10:53:36.841864   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:36.841872   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:36.841883   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:36.843752   96871 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
	I0731 10:53:36.843769   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:36.843778   96871 round_trippers.go:580]     Audit-Id: f94941ae-1e58-47bf-a5c7-ac1f16a05cdc
	I0731 10:53:36.843787   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:36.843796   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:36.843809   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:36.843820   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:36.843833   96871 round_trippers.go:580]     Content-Length: 291
	I0731 10:53:36.843843   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:36 GMT
	I0731 10:53:36.843870   96871 request.go:1188] Response Body: {"kind":"Scale","apiVersion":"autoscaling/v1","metadata":{"name":"coredns","namespace":"kube-system","uid":"fbc8330f-80e7-4b50-9ba4-87f8ccbfc79a","resourceVersion":"448","creationTimestamp":"2023-07-31T10:52:34Z"},"spec":{"replicas":1},"status":{"replicas":1,"selector":"k8s-app=kube-dns"}}
	I0731 10:53:36.843960   96871 kapi.go:248] "coredns" deployment in "kube-system" namespace and "multinode-776386" context rescaled to 1 replicas
	I0731 10:53:36.843992   96871 start.go:223] Will wait 6m0s for node &{Name:m02 IP:192.168.58.3 Port:0 KubernetesVersion:v1.27.3 ContainerRuntime:crio ControlPlane:false Worker:true}
	I0731 10:53:36.846456   96871 out.go:177] * Verifying Kubernetes components...
	I0731 10:53:36.847990   96871 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I0731 10:53:36.858690   96871 loader.go:373] Config loaded from file:  /home/jenkins/minikube-integration/16969-5799/kubeconfig
	I0731 10:53:36.858941   96871 kapi.go:59] client config for multinode-776386: &rest.Config{Host:"https://192.168.58.2:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/client.crt", KeyFile:"/home/jenkins/minikube-integration/16969-5799/.minikube/profiles/multinode-776386/client.key", CAFile:"/home/jenkins/minikube-integration/16969-5799/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProt
os:[]string(nil)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x19c2840), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
	I0731 10:53:36.859210   96871 node_ready.go:35] waiting up to 6m0s for node "multinode-776386-m02" to be "Ready" ...
	I0731 10:53:36.859282   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:36.859296   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:36.859308   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:36.859319   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:36.861620   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:36.861640   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:36.861650   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:36.861660   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:36.861670   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:36.861683   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:36.861696   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:36 GMT
	I0731 10:53:36.861708   96871 round_trippers.go:580]     Audit-Id: 6483f42a-5c18-425a-ba2d-a7603a8c8b86
	I0731 10:53:36.861882   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"483","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.244.1.0/24\"":{}}}}},{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsTyp
e":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alp [truncated 5101 chars]
	I0731 10:53:36.862208   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:36.862221   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:36.862229   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:36.862235   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:36.863916   96871 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
	I0731 10:53:36.863937   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:36.863946   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:36.863954   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:36.863962   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:36.863972   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:36.863985   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:36 GMT
	I0731 10:53:36.864001   96871 round_trippers.go:580]     Audit-Id: 270d31cf-20db-4bfc-a710-763c35810eee
	I0731 10:53:36.864093   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"483","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.244.1.0/24\"":{}}}}},{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsTyp
e":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alp [truncated 5101 chars]
	I0731 10:53:37.365099   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:37.365139   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:37.365148   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:37.365154   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:37.367453   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:37.367475   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:37.367486   96871 round_trippers.go:580]     Audit-Id: d16aa2be-70c6-41e1-8ca0-444f0b3d0f38
	I0731 10:53:37.367495   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:37.367504   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:37.367516   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:37.367528   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:37.367540   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:37 GMT
	I0731 10:53:37.367669   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"483","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.244.1.0/24\"":{}}}}},{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsTyp
e":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alp [truncated 5101 chars]
	I0731 10:53:37.865221   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:37.865239   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:37.865248   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:37.865254   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:37.867134   96871 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
	I0731 10:53:37.867161   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:37.867171   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:37.867179   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:37.867187   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:37.867198   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:37 GMT
	I0731 10:53:37.867206   96871 round_trippers.go:580]     Audit-Id: 482a2f43-618e-42bb-abdf-f025ed5779c1
	I0731 10:53:37.867219   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:37.867327   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"488","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5210 chars]
	I0731 10:53:38.364904   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:38.364928   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:38.364941   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:38.364950   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:38.367117   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:38.367141   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:38.367152   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:38.367161   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:38.367170   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:38.367183   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:38 GMT
	I0731 10:53:38.367199   96871 round_trippers.go:580]     Audit-Id: b7e32aad-f2cf-45a1-aaf5-363016202ed7
	I0731 10:53:38.367208   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:38.367294   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"488","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5210 chars]
	I0731 10:53:38.865066   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:38.865087   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:38.865101   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:38.865111   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:38.867201   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:38.867221   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:38.867228   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:38.867236   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:38.867245   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:38.867254   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:38.867264   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:38 GMT
	I0731 10:53:38.867278   96871 round_trippers.go:580]     Audit-Id: f4391960-1bb8-4556-8295-2ec96155b89d
	I0731 10:53:38.867455   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"488","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5210 chars]
	I0731 10:53:38.867822   96871 node_ready.go:58] node "multinode-776386-m02" has status "Ready":"False"
	I0731 10:53:39.364892   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:39.364913   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:39.364922   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:39.364928   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:39.367137   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:39.367158   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:39.367168   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:39.367177   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:39.367191   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:39.367200   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:39.367210   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:39 GMT
	I0731 10:53:39.367224   96871 round_trippers.go:580]     Audit-Id: 84052236-95c8-4fa8-93e7-74603efefb39
	I0731 10:53:39.367330   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"488","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5210 chars]
	I0731 10:53:39.864858   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:39.864884   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:39.864894   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:39.864905   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:39.867148   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:39.867169   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:39.867179   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:39.867186   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:39.867193   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:39 GMT
	I0731 10:53:39.867201   96871 round_trippers.go:580]     Audit-Id: 75ddfab7-a038-48e4-8f57-0fc7622252af
	I0731 10:53:39.867209   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:39.867218   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:39.867328   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"488","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5210 chars]
	I0731 10:53:40.364859   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:40.364884   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:40.364897   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:40.364908   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:40.367006   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:40.367028   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:40.367037   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:40.367044   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:40.367052   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:40.367060   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:40 GMT
	I0731 10:53:40.367068   96871 round_trippers.go:580]     Audit-Id: 2b583a33-6ae0-4128-a2cc-4a4326caa0cb
	I0731 10:53:40.367081   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:40.367201   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"488","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5210 chars]
	I0731 10:53:40.864779   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:40.864801   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:40.864811   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:40.864823   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:40.867141   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:40.867164   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:40.867172   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:40 GMT
	I0731 10:53:40.867177   96871 round_trippers.go:580]     Audit-Id: 1afe4c51-dc62-47bc-a182-00bd9b33171d
	I0731 10:53:40.867183   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:40.867188   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:40.867195   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:40.867206   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:40.867342   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"488","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5210 chars]
	I0731 10:53:41.365053   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:41.365077   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:41.365089   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:41.365100   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:41.367311   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:41.367333   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:41.367343   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:41.367351   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:41.367357   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:41.367363   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:41.367369   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:41 GMT
	I0731 10:53:41.367378   96871 round_trippers.go:580]     Audit-Id: e61d24a6-112a-441c-986b-1f829e073ae6
	I0731 10:53:41.367453   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"488","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5210 chars]
	I0731 10:53:41.367727   96871 node_ready.go:58] node "multinode-776386-m02" has status "Ready":"False"
	I0731 10:53:41.864997   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:41.865016   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:41.865024   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:41.865030   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:41.867289   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:41.867310   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:41.867320   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:41.867331   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:41.867340   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:41.867348   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:41 GMT
	I0731 10:53:41.867357   96871 round_trippers.go:580]     Audit-Id: 75cee089-6b9c-4995-9fe7-a08f4ee1057f
	I0731 10:53:41.867371   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:41.867510   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"488","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5210 chars]
	I0731 10:53:42.365072   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:42.365092   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:42.365099   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:42.365105   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:42.367208   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:42.367227   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:42.367237   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:42.367243   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:42.367251   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:42 GMT
	I0731 10:53:42.367263   96871 round_trippers.go:580]     Audit-Id: 1887c249-7dcc-43d1-a09e-97548dd6f541
	I0731 10:53:42.367272   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:42.367280   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:42.367433   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"488","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5210 chars]
	I0731 10:53:42.864974   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:42.864994   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:42.865002   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:42.865008   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:42.867092   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:42.867118   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:42.867130   96871 round_trippers.go:580]     Audit-Id: 743bb916-e346-462f-a23d-36cc0750cc3e
	I0731 10:53:42.867139   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:42.867146   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:42.867152   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:42.867165   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:42.867178   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:42 GMT
	I0731 10:53:42.867314   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"488","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5210 chars]
	I0731 10:53:43.364761   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:43.364780   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:43.364788   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:43.364795   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:43.367014   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:43.367032   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:43.367039   96871 round_trippers.go:580]     Audit-Id: c294b6e0-8c8f-464c-87df-989db900f892
	I0731 10:53:43.367045   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:43.367050   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:43.367055   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:43.367061   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:43.367066   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:43 GMT
	I0731 10:53:43.367200   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"488","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5210 chars]
	I0731 10:53:43.864763   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:43.864783   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:43.864791   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:43.864797   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:43.867065   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:43.867088   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:43.867099   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:43.867108   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:43.867116   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:43.867129   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:43 GMT
	I0731 10:53:43.867141   96871 round_trippers.go:580]     Audit-Id: 997e8e6d-b29f-49c9-ad63-6b8eecc1d1e5
	I0731 10:53:43.867149   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:43.867259   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"488","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5210 chars]
	I0731 10:53:43.867586   96871 node_ready.go:58] node "multinode-776386-m02" has status "Ready":"False"
	I0731 10:53:44.364829   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:44.364853   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:44.364863   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:44.364870   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:44.367037   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:44.367060   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:44.367071   96871 round_trippers.go:580]     Audit-Id: 720fadff-a740-4c48-bace-daff146f12b0
	I0731 10:53:44.367079   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:44.367087   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:44.367096   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:44.367106   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:44.367114   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:44 GMT
	I0731 10:53:44.367239   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"488","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5210 chars]
	I0731 10:53:44.864779   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:44.864798   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:44.864806   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:44.864812   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:44.867034   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:44.867058   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:44.867068   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:44.867077   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:44.867086   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:44.867095   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:44.867104   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:44 GMT
	I0731 10:53:44.867117   96871 round_trippers.go:580]     Audit-Id: 698361ba-f5b9-4f4c-a1a6-e124f0b21a7c
	I0731 10:53:44.867232   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"488","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5210 chars]
	I0731 10:53:45.364765   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:45.364784   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:45.364792   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:45.364798   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:45.366975   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:45.366997   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:45.367006   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:45.367015   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:45.367023   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:45.367031   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:45 GMT
	I0731 10:53:45.367041   96871 round_trippers.go:580]     Audit-Id: 5306b553-cd4d-447a-9d2c-3216337ecc41
	I0731 10:53:45.367053   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:45.367139   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"488","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5210 chars]
	I0731 10:53:45.864699   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:45.864719   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:45.864727   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:45.864733   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:45.867160   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:45.867177   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:45.867184   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:45.867190   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:45.867196   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:45 GMT
	I0731 10:53:45.867201   96871 round_trippers.go:580]     Audit-Id: 74319e43-038e-4173-a10b-d2848fbf6f32
	I0731 10:53:45.867210   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:45.867217   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:45.867330   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"488","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5210 chars]
	I0731 10:53:45.867643   96871 node_ready.go:58] node "multinode-776386-m02" has status "Ready":"False"
	I0731 10:53:46.365306   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:46.365331   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:46.365344   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:46.365354   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:46.367352   96871 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
	I0731 10:53:46.367372   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:46.367380   96871 round_trippers.go:580]     Audit-Id: 64fd0ec9-2f3f-4256-bbf4-fef92bd61a69
	I0731 10:53:46.367390   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:46.367405   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:46.367419   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:46.367434   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:46.367446   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:46 GMT
	I0731 10:53:46.367582   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"488","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5210 chars]
	I0731 10:53:46.864649   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:46.864669   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:46.864677   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:46.864684   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:46.866949   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:46.866974   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:46.866985   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:46.866994   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:46.867004   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:46 GMT
	I0731 10:53:46.867013   96871 round_trippers.go:580]     Audit-Id: bd0c4418-4917-4e24-81e9-b2f92bcdd38c
	I0731 10:53:46.867029   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:46.867041   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:46.867172   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:53:47.364689   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:47.364709   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:47.364722   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:47.364729   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:47.366894   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:47.366916   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:47.366927   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:47.366936   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:47.366943   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:47.366953   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:47 GMT
	I0731 10:53:47.366965   96871 round_trippers.go:580]     Audit-Id: b5e0f833-f1f1-4617-9f0a-743dae33d7fd
	I0731 10:53:47.366979   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:47.367086   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:53:47.864697   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:47.864736   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:47.864757   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:47.864774   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:47.866986   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:47.867009   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:47.867020   96871 round_trippers.go:580]     Audit-Id: 0786e913-5535-440d-8f68-305c68d4cb69
	I0731 10:53:47.867030   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:47.867039   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:47.867049   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:47.867061   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:47.867071   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:47 GMT
	I0731 10:53:47.867169   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:53:48.364676   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:48.364698   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:48.364705   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:48.364715   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:48.366854   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:48.366879   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:48.366889   96871 round_trippers.go:580]     Audit-Id: bdaa95a8-067e-4756-8bef-1034fa05f79a
	I0731 10:53:48.366898   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:48.366907   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:48.366916   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:48.366925   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:48.366935   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:48 GMT
	I0731 10:53:48.367030   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:53:48.367388   96871 node_ready.go:58] node "multinode-776386-m02" has status "Ready":"False"
	I0731 10:53:48.864794   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:48.864822   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:48.864831   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:48.864837   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:48.867235   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:48.867262   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:48.867275   96871 round_trippers.go:580]     Audit-Id: d879cc17-d48a-469e-a02d-94d1600bab95
	I0731 10:53:48.867283   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:48.867289   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:48.867295   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:48.867301   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:48.867308   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:48 GMT
	I0731 10:53:48.867431   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:53:49.364976   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:49.364999   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:49.365007   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:49.365013   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:49.367388   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:49.367425   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:49.367436   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:49.367446   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:49.367456   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:49 GMT
	I0731 10:53:49.367470   96871 round_trippers.go:580]     Audit-Id: 14437264-7c5d-4c24-8f97-300dadacb603
	I0731 10:53:49.367484   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:49.367493   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:49.367595   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:53:49.865175   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:49.865196   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:49.865204   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:49.865210   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:49.867580   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:49.867606   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:49.867617   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:49.867625   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:49.867633   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:49.867642   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:49 GMT
	I0731 10:53:49.867654   96871 round_trippers.go:580]     Audit-Id: bfe4a3c4-1d1c-4b16-a065-06d9ff1a0f68
	I0731 10:53:49.867665   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:49.867773   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:53:50.365382   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:50.365399   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:50.365408   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:50.365415   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:50.367799   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:50.367823   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:50.367834   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:50.367843   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:50.367852   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:50.367862   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:50 GMT
	I0731 10:53:50.367872   96871 round_trippers.go:580]     Audit-Id: 7d915052-bb30-4ebd-9165-bd71aad3622f
	I0731 10:53:50.367887   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:50.367991   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:53:50.368343   96871 node_ready.go:58] node "multinode-776386-m02" has status "Ready":"False"
	I0731 10:53:50.864481   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:50.864507   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:50.864518   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:50.864527   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:50.866817   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:50.866838   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:50.866847   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:50 GMT
	I0731 10:53:50.866855   96871 round_trippers.go:580]     Audit-Id: 5318acfb-08a9-4dbc-9914-34778a6949fa
	I0731 10:53:50.866863   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:50.866872   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:50.866881   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:50.866894   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:50.866998   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:53:51.364755   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:51.364775   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:51.364787   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:51.364795   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:51.366923   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:51.366948   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:51.366962   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:51.366970   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:51.366975   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:51.366981   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:51 GMT
	I0731 10:53:51.366986   96871 round_trippers.go:580]     Audit-Id: 637ece30-d05e-41a9-90be-84ff102b1d5d
	I0731 10:53:51.366994   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:51.367074   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:53:51.864656   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:51.864674   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:51.864683   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:51.864692   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:51.866820   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:51.866844   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:51.866854   96871 round_trippers.go:580]     Audit-Id: 864a2fa2-9a35-4cdc-a904-20f97d2aae97
	I0731 10:53:51.866862   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:51.866869   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:51.866878   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:51.866890   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:51.866904   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:51 GMT
	I0731 10:53:51.867010   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:53:52.364477   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:52.364501   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:52.364509   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:52.364515   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:52.366898   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:52.366922   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:52.366929   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:52.366935   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:52.366940   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:52.366946   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:52.366954   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:52 GMT
	I0731 10:53:52.366963   96871 round_trippers.go:580]     Audit-Id: e8e5d701-bb10-4eac-b878-a22d8f145ac9
	I0731 10:53:52.367069   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:53:52.864653   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:52.864673   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:52.864682   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:52.864688   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:52.866750   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:52.866776   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:52.866786   96871 round_trippers.go:580]     Audit-Id: 2df90706-1c61-4447-bd18-41fdf14eab65
	I0731 10:53:52.866794   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:52.866802   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:52.866811   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:52.866830   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:52.866839   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:52 GMT
	I0731 10:53:52.866966   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:53:52.867271   96871 node_ready.go:58] node "multinode-776386-m02" has status "Ready":"False"
	I0731 10:53:53.364475   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:53.364495   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:53.364503   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:53.364509   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:53.366796   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:53.366817   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:53.366823   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:53 GMT
	I0731 10:53:53.366829   96871 round_trippers.go:580]     Audit-Id: 204cee30-c681-460a-8bc0-4740e530f9f3
	I0731 10:53:53.366835   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:53.366842   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:53.366848   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:53.366856   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:53.366982   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:53:53.864537   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:53.864558   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:53.864566   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:53.864572   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:53.866778   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:53.866802   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:53.866813   96871 round_trippers.go:580]     Audit-Id: 3c4b1475-fb6e-4082-9a66-a185c9a457a0
	I0731 10:53:53.866822   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:53.866830   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:53.866835   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:53.866844   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:53.866850   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:53 GMT
	I0731 10:53:53.866961   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:53:54.365534   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:54.365557   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:54.365570   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:54.365581   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:54.367851   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:54.367871   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:54.367887   96871 round_trippers.go:580]     Audit-Id: 85a1559b-1bb5-4e96-a2ab-d900f8176b7b
	I0731 10:53:54.367897   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:54.367910   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:54.367919   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:54.367931   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:54.367941   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:54 GMT
	I0731 10:53:54.368020   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:53:54.864528   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:54.864568   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:54.864579   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:54.864586   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:54.866818   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:54.866839   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:54.866849   96871 round_trippers.go:580]     Audit-Id: 2fd99690-c594-41b4-b940-0dc086e94879
	I0731 10:53:54.866859   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:54.866867   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:54.866877   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:54.866891   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:54.866904   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:54 GMT
	I0731 10:53:54.867040   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:53:54.867350   96871 node_ready.go:58] node "multinode-776386-m02" has status "Ready":"False"
	I0731 10:53:55.364538   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:55.364557   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:55.364565   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:55.364571   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:55.366764   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:55.366784   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:55.366793   96871 round_trippers.go:580]     Audit-Id: a920d66e-af34-40f5-8491-3e84ee53f519
	I0731 10:53:55.366801   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:55.366809   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:55.366817   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:55.366825   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:55.366839   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:55 GMT
	I0731 10:53:55.366947   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:53:55.864536   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:55.864553   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:55.864561   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:55.864567   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:55.866801   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:55.866832   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:55.866842   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:55.866851   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:55 GMT
	I0731 10:53:55.866859   96871 round_trippers.go:580]     Audit-Id: ecdbb5b5-e808-4d84-9a73-ddd17ea502b0
	I0731 10:53:55.866868   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:55.866878   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:55.866895   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:55.867012   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:53:56.364507   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:56.364525   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:56.364533   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:56.364539   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:56.366709   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:56.366735   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:56.366746   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:56.366755   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:56.366764   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:56 GMT
	I0731 10:53:56.366773   96871 round_trippers.go:580]     Audit-Id: 3c60e23d-aff2-4369-8c58-b9567cc508a0
	I0731 10:53:56.366785   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:56.366794   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:56.366888   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:53:56.865540   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:56.865561   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:56.865573   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:56.865584   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:56.867723   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:56.867742   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:56.867752   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:56.867762   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:56.867775   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:56 GMT
	I0731 10:53:56.867785   96871 round_trippers.go:580]     Audit-Id: 38a63a56-3df9-4485-b57f-7b97d795ac6d
	I0731 10:53:56.867791   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:56.867797   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:56.867899   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:53:56.868188   96871 node_ready.go:58] node "multinode-776386-m02" has status "Ready":"False"
	I0731 10:53:57.365529   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:57.365551   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:57.365559   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:57.365570   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:57.367737   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:57.367755   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:57.367762   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:57.367769   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:57.367777   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:57.367788   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:57.367801   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:57 GMT
	I0731 10:53:57.367813   96871 round_trippers.go:580]     Audit-Id: bf6528be-d575-44b7-984c-71a918c40411
	I0731 10:53:57.367909   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:53:57.865478   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:57.865496   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:57.865505   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:57.865511   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:57.867778   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:57.867799   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:57.867808   96871 round_trippers.go:580]     Audit-Id: 24f61c38-5c03-4fb3-b780-e63dde1e48c3
	I0731 10:53:57.867817   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:57.867825   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:57.867834   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:57.867842   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:57.867853   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:57 GMT
	I0731 10:53:57.867953   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:53:58.364504   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:58.364522   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:58.364530   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:58.364538   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:58.366755   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:58.366776   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:58.366784   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:58 GMT
	I0731 10:53:58.366795   96871 round_trippers.go:580]     Audit-Id: 6fa9b956-9c34-47a7-bffd-32a93b2306d7
	I0731 10:53:58.366804   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:58.366812   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:58.366828   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:58.366834   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:58.366933   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:53:58.864601   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:58.864621   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:58.864629   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:58.864636   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:58.866887   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:58.866913   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:58.866925   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:58.866932   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:58.866942   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:58.866955   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:58 GMT
	I0731 10:53:58.866966   96871 round_trippers.go:580]     Audit-Id: ee5c3379-b662-4816-b0bd-32ea389bd018
	I0731 10:53:58.866976   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:58.867103   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:53:59.364644   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:59.364663   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:59.364672   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:59.364678   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:59.367015   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:59.367034   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:59.367043   96871 round_trippers.go:580]     Audit-Id: 78c47a5a-4fdf-44b7-85f6-b109f7ce13c4
	I0731 10:53:59.367050   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:59.367057   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:59.367065   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:59.367074   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:59.367087   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:59 GMT
	I0731 10:53:59.367180   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:53:59.367471   96871 node_ready.go:58] node "multinode-776386-m02" has status "Ready":"False"
	I0731 10:53:59.864735   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:53:59.864754   96871 round_trippers.go:469] Request Headers:
	I0731 10:53:59.864762   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:53:59.864768   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:53:59.867167   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:53:59.867187   96871 round_trippers.go:577] Response Headers:
	I0731 10:53:59.867194   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:53:59 GMT
	I0731 10:53:59.867199   96871 round_trippers.go:580]     Audit-Id: 3215d3b8-4e8e-4d38-9262-477fbbf4fcb1
	I0731 10:53:59.867205   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:53:59.867210   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:53:59.867216   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:53:59.867224   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:53:59.867326   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:54:00.364805   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:54:00.364825   96871 round_trippers.go:469] Request Headers:
	I0731 10:54:00.364833   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:54:00.364840   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:54:00.367062   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:54:00.367081   96871 round_trippers.go:577] Response Headers:
	I0731 10:54:00.367091   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:54:00.367100   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:54:00.367107   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:54:00 GMT
	I0731 10:54:00.367115   96871 round_trippers.go:580]     Audit-Id: adcf3f22-4ee8-4375-ae60-b74ea93ff78f
	I0731 10:54:00.367124   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:54:00.367134   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:54:00.367231   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:54:00.864790   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:54:00.864809   96871 round_trippers.go:469] Request Headers:
	I0731 10:54:00.864817   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:54:00.864824   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:54:00.867043   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:54:00.867067   96871 round_trippers.go:577] Response Headers:
	I0731 10:54:00.867075   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:54:00.867081   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:54:00.867086   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:54:00.867092   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:54:00.867097   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:54:00 GMT
	I0731 10:54:00.867103   96871 round_trippers.go:580]     Audit-Id: 44ff03a4-6cfd-4ae1-82bc-8ffe17355d96
	I0731 10:54:00.867197   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:54:01.364916   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:54:01.364934   96871 round_trippers.go:469] Request Headers:
	I0731 10:54:01.364943   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:54:01.364952   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:54:01.367222   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:54:01.367252   96871 round_trippers.go:577] Response Headers:
	I0731 10:54:01.367259   96871 round_trippers.go:580]     Audit-Id: 950ab7a0-e958-4331-b066-f4844bd6156a
	I0731 10:54:01.367265   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:54:01.367271   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:54:01.367276   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:54:01.367283   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:54:01.367289   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:54:01 GMT
	I0731 10:54:01.367398   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:54:01.367697   96871 node_ready.go:58] node "multinode-776386-m02" has status "Ready":"False"
	I0731 10:54:01.864947   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:54:01.864965   96871 round_trippers.go:469] Request Headers:
	I0731 10:54:01.864974   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:54:01.864980   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:54:01.867134   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:54:01.867156   96871 round_trippers.go:577] Response Headers:
	I0731 10:54:01.867164   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:54:01.867170   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:54:01.867175   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:54:01.867181   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:54:01.867187   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:54:01 GMT
	I0731 10:54:01.867195   96871 round_trippers.go:580]     Audit-Id: f8506b77-3a2d-4840-bfc9-00f5f09b0b97
	I0731 10:54:01.867306   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:54:02.364779   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:54:02.364799   96871 round_trippers.go:469] Request Headers:
	I0731 10:54:02.364809   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:54:02.364817   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:54:02.367005   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:54:02.367031   96871 round_trippers.go:577] Response Headers:
	I0731 10:54:02.367041   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:54:02.367051   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:54:02.367061   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:54:02.367072   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:54:02 GMT
	I0731 10:54:02.367079   96871 round_trippers.go:580]     Audit-Id: 52c7a12a-b35f-496f-8470-7721fcf5fb6a
	I0731 10:54:02.367085   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:54:02.367178   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:54:02.864666   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:54:02.864688   96871 round_trippers.go:469] Request Headers:
	I0731 10:54:02.864696   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:54:02.864702   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:54:02.866711   96871 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
	I0731 10:54:02.866738   96871 round_trippers.go:577] Response Headers:
	I0731 10:54:02.866750   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:54:02.866760   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:54:02.866774   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:54:02.866788   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:54:02 GMT
	I0731 10:54:02.866802   96871 round_trippers.go:580]     Audit-Id: 4f867835-64a9-4b0e-8ca0-eaff0eac0b00
	I0731 10:54:02.866816   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:54:02.866951   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:54:03.365469   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:54:03.365494   96871 round_trippers.go:469] Request Headers:
	I0731 10:54:03.365502   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:54:03.365508   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:54:03.367657   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:54:03.367686   96871 round_trippers.go:577] Response Headers:
	I0731 10:54:03.367697   96871 round_trippers.go:580]     Audit-Id: 1f5ce95b-80a0-4c12-97dc-3c7709b50eca
	I0731 10:54:03.367703   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:54:03.367709   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:54:03.367715   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:54:03.367721   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:54:03.367726   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:54:03 GMT
	I0731 10:54:03.367809   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:54:03.368083   96871 node_ready.go:58] node "multinode-776386-m02" has status "Ready":"False"
	I0731 10:54:03.865441   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:54:03.865459   96871 round_trippers.go:469] Request Headers:
	I0731 10:54:03.865467   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:54:03.865473   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:54:03.867566   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:54:03.867583   96871 round_trippers.go:577] Response Headers:
	I0731 10:54:03.867591   96871 round_trippers.go:580]     Audit-Id: e89bd8b7-c26b-445a-8042-6852a94e5e4a
	I0731 10:54:03.867597   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:54:03.867606   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:54:03.867614   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:54:03.867626   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:54:03.867634   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:54:03 GMT
	I0731 10:54:03.867765   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:54:04.365389   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:54:04.365408   96871 round_trippers.go:469] Request Headers:
	I0731 10:54:04.365416   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:54:04.365423   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:54:04.367698   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:54:04.367720   96871 round_trippers.go:577] Response Headers:
	I0731 10:54:04.367728   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:54:04.367736   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:54:04 GMT
	I0731 10:54:04.367745   96871 round_trippers.go:580]     Audit-Id: 6db5493e-1fcf-4d42-9fd6-c39de03a7f2d
	I0731 10:54:04.367755   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:54:04.367764   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:54:04.367774   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:54:04.367872   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:54:04.865492   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:54:04.865512   96871 round_trippers.go:469] Request Headers:
	I0731 10:54:04.865524   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:54:04.865533   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:54:04.867823   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:54:04.867841   96871 round_trippers.go:577] Response Headers:
	I0731 10:54:04.867850   96871 round_trippers.go:580]     Audit-Id: 8d7802a6-c04d-4a6b-bc1d-4d35a6e76929
	I0731 10:54:04.867856   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:54:04.867861   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:54:04.867868   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:54:04.867877   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:54:04.867886   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:54:04 GMT
	I0731 10:54:04.867991   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:54:05.364512   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:54:05.364534   96871 round_trippers.go:469] Request Headers:
	I0731 10:54:05.364548   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:54:05.364556   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:54:05.366778   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:54:05.366804   96871 round_trippers.go:577] Response Headers:
	I0731 10:54:05.366815   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:54:05.366824   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:54:05.366833   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:54:05 GMT
	I0731 10:54:05.366844   96871 round_trippers.go:580]     Audit-Id: 14185299-0483-4d07-9a42-1ffd41a2b41a
	I0731 10:54:05.366856   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:54:05.366865   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:54:05.366973   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:54:05.864541   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:54:05.864562   96871 round_trippers.go:469] Request Headers:
	I0731 10:54:05.864570   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:54:05.864576   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:54:05.866813   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:54:05.866833   96871 round_trippers.go:577] Response Headers:
	I0731 10:54:05.866841   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:54:05 GMT
	I0731 10:54:05.866847   96871 round_trippers.go:580]     Audit-Id: 17a959e5-700a-462b-bb06-882ea60189bb
	I0731 10:54:05.866853   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:54:05.866864   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:54:05.866873   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:54:05.866878   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:54:05.866998   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:54:05.867384   96871 node_ready.go:58] node "multinode-776386-m02" has status "Ready":"False"
	I0731 10:54:06.364829   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:54:06.364849   96871 round_trippers.go:469] Request Headers:
	I0731 10:54:06.364862   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:54:06.364869   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:54:06.366980   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:54:06.366999   96871 round_trippers.go:577] Response Headers:
	I0731 10:54:06.367009   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:54:06 GMT
	I0731 10:54:06.367016   96871 round_trippers.go:580]     Audit-Id: 186bc3aa-25d1-4a94-a112-30787525d5d2
	I0731 10:54:06.367023   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:54:06.367031   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:54:06.367040   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:54:06.367052   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:54:06.367138   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:54:06.864567   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:54:06.864589   96871 round_trippers.go:469] Request Headers:
	I0731 10:54:06.864600   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:54:06.864608   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:54:06.866858   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:54:06.866877   96871 round_trippers.go:577] Response Headers:
	I0731 10:54:06.866888   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:54:06 GMT
	I0731 10:54:06.866895   96871 round_trippers.go:580]     Audit-Id: f50317d3-80e0-4057-b8d7-8fcb53d835e5
	I0731 10:54:06.866904   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:54:06.866912   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:54:06.866923   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:54:06.866930   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:54:06.867017   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:54:07.364535   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:54:07.364555   96871 round_trippers.go:469] Request Headers:
	I0731 10:54:07.364567   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:54:07.364576   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:54:07.366809   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:54:07.366828   96871 round_trippers.go:577] Response Headers:
	I0731 10:54:07.366835   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:54:07.366841   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:54:07.366846   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:54:07.366852   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:54:07.366857   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:54:07 GMT
	I0731 10:54:07.366862   96871 round_trippers.go:580]     Audit-Id: 11d327bb-b084-40e0-8ac4-bfc31040e6c4
	I0731 10:54:07.366955   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:54:07.864516   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:54:07.864545   96871 round_trippers.go:469] Request Headers:
	I0731 10:54:07.864553   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:54:07.864559   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:54:07.866785   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:54:07.866807   96871 round_trippers.go:577] Response Headers:
	I0731 10:54:07.866817   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:54:07.866825   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:54:07 GMT
	I0731 10:54:07.866833   96871 round_trippers.go:580]     Audit-Id: a4afaf26-cea2-4df2-9807-a0dfda77973e
	I0731 10:54:07.866842   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:54:07.866856   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:54:07.866869   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:54:07.866996   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:54:08.364498   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:54:08.364516   96871 round_trippers.go:469] Request Headers:
	I0731 10:54:08.364525   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:54:08.364530   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:54:08.366817   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:54:08.366841   96871 round_trippers.go:577] Response Headers:
	I0731 10:54:08.366850   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:54:08.366859   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:54:08.366868   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:54:08.366881   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:54:08.366890   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:54:08 GMT
	I0731 10:54:08.366896   96871 round_trippers.go:580]     Audit-Id: 5d214650-bd6d-4028-a945-f84aeb4ca440
	I0731 10:54:08.366997   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"509","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5479 chars]
	I0731 10:54:08.367288   96871 node_ready.go:58] node "multinode-776386-m02" has status "Ready":"False"
	I0731 10:54:08.864651   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:54:08.864671   96871 round_trippers.go:469] Request Headers:
	I0731 10:54:08.864682   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:54:08.864690   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:54:08.867103   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:54:08.867121   96871 round_trippers.go:577] Response Headers:
	I0731 10:54:08.867128   96871 round_trippers.go:580]     Audit-Id: 99ad6963-f237-4f4b-9b46-1d007628e408
	I0731 10:54:08.867133   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:54:08.867139   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:54:08.867144   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:54:08.867153   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:54:08.867161   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:54:08 GMT
	I0731 10:54:08.867257   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"533","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5296 chars]
	I0731 10:54:08.867583   96871 node_ready.go:49] node "multinode-776386-m02" has status "Ready":"True"
	I0731 10:54:08.867599   96871 node_ready.go:38] duration metric: took 32.008373615s waiting for node "multinode-776386-m02" to be "Ready" ...
	I0731 10:54:08.867607   96871 pod_ready.go:35] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
	I0731 10:54:08.867663   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods
	I0731 10:54:08.867674   96871 round_trippers.go:469] Request Headers:
	I0731 10:54:08.867681   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:54:08.867689   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:54:08.872577   96871 round_trippers.go:574] Response Status: 200 OK in 4 milliseconds
	I0731 10:54:08.872600   96871 round_trippers.go:577] Response Headers:
	I0731 10:54:08.872608   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:54:08.872614   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:54:08.872620   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:54:08.872629   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:54:08.872639   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:54:08 GMT
	I0731 10:54:08.872650   96871 round_trippers.go:580]     Audit-Id: bdf6781e-96c3-419d-a21c-00e41ea385c0
	I0731 10:54:08.873148   96871 request.go:1188] Response Body: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"533"},"items":[{"metadata":{"name":"coredns-5d78c9869d-w86c5","generateName":"coredns-5d78c9869d-","namespace":"kube-system","uid":"fcb57c8f-9276-4e70-a275-2865ac997394","resourceVersion":"444","creationTimestamp":"2023-07-31T10:52:48Z","labels":{"k8s-app":"kube-dns","pod-template-hash":"5d78c9869d"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"ReplicaSet","name":"coredns-5d78c9869d","uid":"78328e85-a38b-4605-8363-2be69c87f749","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:52:48Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:k8s-app":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"78328e85-a38b-4605-8363-2be69c87f749\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:podAntiAffinity":{".":{},"f
:preferredDuringSchedulingIgnoredDuringExecution":{}}},"f:containers":{ [truncated 68974 chars]
	I0731 10:54:08.875216   96871 pod_ready.go:78] waiting up to 6m0s for pod "coredns-5d78c9869d-w86c5" in "kube-system" namespace to be "Ready" ...
	I0731 10:54:08.875275   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/coredns-5d78c9869d-w86c5
	I0731 10:54:08.875283   96871 round_trippers.go:469] Request Headers:
	I0731 10:54:08.875291   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:54:08.875299   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:54:08.877124   96871 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
	I0731 10:54:08.877138   96871 round_trippers.go:577] Response Headers:
	I0731 10:54:08.877144   96871 round_trippers.go:580]     Audit-Id: 45c46714-1b4e-408e-bbd0-9529db120ef9
	I0731 10:54:08.877150   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:54:08.877156   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:54:08.877161   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:54:08.877167   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:54:08.877172   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:54:08 GMT
	I0731 10:54:08.877285   96871 request.go:1188] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"coredns-5d78c9869d-w86c5","generateName":"coredns-5d78c9869d-","namespace":"kube-system","uid":"fcb57c8f-9276-4e70-a275-2865ac997394","resourceVersion":"444","creationTimestamp":"2023-07-31T10:52:48Z","labels":{"k8s-app":"kube-dns","pod-template-hash":"5d78c9869d"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"ReplicaSet","name":"coredns-5d78c9869d","uid":"78328e85-a38b-4605-8363-2be69c87f749","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:52:48Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:k8s-app":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"78328e85-a38b-4605-8363-2be69c87f749\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:podAntiAffinity":{".":{},"f:preferredDuringSchedulingIgnoredDuringExecution":{
}}},"f:containers":{"k:{\"name\":\"coredns\"}":{".":{},"f:args":{},"f:i [truncated 6263 chars]
	I0731 10:54:08.877717   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:54:08.877731   96871 round_trippers.go:469] Request Headers:
	I0731 10:54:08.877738   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:54:08.877745   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:54:08.879526   96871 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
	I0731 10:54:08.879540   96871 round_trippers.go:577] Response Headers:
	I0731 10:54:08.879546   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:54:08.879552   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:54:08 GMT
	I0731 10:54:08.879557   96871 round_trippers.go:580]     Audit-Id: 1084b42b-b2f5-4bef-ad18-e39fc61a43e3
	I0731 10:54:08.879562   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:54:08.879567   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:54:08.879574   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:54:08.879722   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"428","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 5947 chars]
	I0731 10:54:08.880106   96871 pod_ready.go:92] pod "coredns-5d78c9869d-w86c5" in "kube-system" namespace has status "Ready":"True"
	I0731 10:54:08.880123   96871 pod_ready.go:81] duration metric: took 4.887888ms waiting for pod "coredns-5d78c9869d-w86c5" in "kube-system" namespace to be "Ready" ...
	I0731 10:54:08.880139   96871 pod_ready.go:78] waiting up to 6m0s for pod "etcd-multinode-776386" in "kube-system" namespace to be "Ready" ...
	I0731 10:54:08.880196   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/etcd-multinode-776386
	I0731 10:54:08.880207   96871 round_trippers.go:469] Request Headers:
	I0731 10:54:08.880218   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:54:08.880231   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:54:08.881900   96871 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
	I0731 10:54:08.881919   96871 round_trippers.go:577] Response Headers:
	I0731 10:54:08.881930   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:54:08.881949   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:54:08.881964   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:54:08.881977   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:54:08 GMT
	I0731 10:54:08.881989   96871 round_trippers.go:580]     Audit-Id: 95cd2e5c-125b-4bf1-80dd-54bab5d1dcb5
	I0731 10:54:08.882001   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:54:08.882091   96871 request.go:1188] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"etcd-multinode-776386","namespace":"kube-system","uid":"09d3804e-7c35-4295-af6c-b2f481c4903d","resourceVersion":"324","creationTimestamp":"2023-07-31T10:52:34Z","labels":{"component":"etcd","tier":"control-plane"},"annotations":{"kubeadm.kubernetes.io/etcd.advertise-client-urls":"https://192.168.58.2:2379","kubernetes.io/config.hash":"8337fb7d52d96869ff08a0ef3d4aa6f6","kubernetes.io/config.mirror":"8337fb7d52d96869ff08a0ef3d4aa6f6","kubernetes.io/config.seen":"2023-07-31T10:52:34.447469250Z","kubernetes.io/config.source":"file"},"ownerReferences":[{"apiVersion":"v1","kind":"Node","name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","controller":true}],"managedFields":[{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:52:34Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:kubeadm.kubernetes.io/etcd.advertise-cl
ient-urls":{},"f:kubernetes.io/config.hash":{},"f:kubernetes.io/config. [truncated 5833 chars]
	I0731 10:54:08.882458   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:54:08.882470   96871 round_trippers.go:469] Request Headers:
	I0731 10:54:08.882477   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:54:08.882484   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:54:08.884086   96871 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
	I0731 10:54:08.884106   96871 round_trippers.go:577] Response Headers:
	I0731 10:54:08.884117   96871 round_trippers.go:580]     Audit-Id: 67729973-13c3-4df7-9990-51f800a1acd7
	I0731 10:54:08.884126   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:54:08.884138   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:54:08.884148   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:54:08.884161   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:54:08.884173   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:54:08 GMT
	I0731 10:54:08.884290   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"428","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 5947 chars]
	I0731 10:54:08.884647   96871 pod_ready.go:92] pod "etcd-multinode-776386" in "kube-system" namespace has status "Ready":"True"
	I0731 10:54:08.884663   96871 pod_ready.go:81] duration metric: took 4.513343ms waiting for pod "etcd-multinode-776386" in "kube-system" namespace to be "Ready" ...
	I0731 10:54:08.884679   96871 pod_ready.go:78] waiting up to 6m0s for pod "kube-apiserver-multinode-776386" in "kube-system" namespace to be "Ready" ...
	I0731 10:54:08.884732   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-multinode-776386
	I0731 10:54:08.884744   96871 round_trippers.go:469] Request Headers:
	I0731 10:54:08.884755   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:54:08.884766   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:54:08.886382   96871 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
	I0731 10:54:08.886402   96871 round_trippers.go:577] Response Headers:
	I0731 10:54:08.886412   96871 round_trippers.go:580]     Audit-Id: 7600f899-28a3-4963-8044-5e39aee8762e
	I0731 10:54:08.886419   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:54:08.886424   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:54:08.886430   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:54:08.886436   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:54:08.886442   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:54:08 GMT
	I0731 10:54:08.886578   96871 request.go:1188] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"kube-apiserver-multinode-776386","namespace":"kube-system","uid":"cd8aaec2-83b4-4346-9ad9-9167afe8b68f","resourceVersion":"328","creationTimestamp":"2023-07-31T10:52:34Z","labels":{"component":"kube-apiserver","tier":"control-plane"},"annotations":{"kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint":"192.168.58.2:8443","kubernetes.io/config.hash":"680a3d2f61a1a9343330816821eb0e8f","kubernetes.io/config.mirror":"680a3d2f61a1a9343330816821eb0e8f","kubernetes.io/config.seen":"2023-07-31T10:52:34.447473226Z","kubernetes.io/config.source":"file"},"ownerReferences":[{"apiVersion":"v1","kind":"Node","name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","controller":true}],"managedFields":[{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:52:34Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:kubeadm.kube
rnetes.io/kube-apiserver.advertise-address.endpoint":{},"f:kubernetes.i [truncated 8219 chars]
	I0731 10:54:08.886974   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:54:08.886986   96871 round_trippers.go:469] Request Headers:
	I0731 10:54:08.887000   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:54:08.887006   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:54:08.888558   96871 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
	I0731 10:54:08.888579   96871 round_trippers.go:577] Response Headers:
	I0731 10:54:08.888601   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:54:08.888613   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:54:08 GMT
	I0731 10:54:08.888623   96871 round_trippers.go:580]     Audit-Id: f8496a49-a1f8-4464-85ab-742c45284103
	I0731 10:54:08.888639   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:54:08.888646   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:54:08.888654   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:54:08.888753   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"428","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 5947 chars]
	I0731 10:54:08.889037   96871 pod_ready.go:92] pod "kube-apiserver-multinode-776386" in "kube-system" namespace has status "Ready":"True"
	I0731 10:54:08.889050   96871 pod_ready.go:81] duration metric: took 4.360618ms waiting for pod "kube-apiserver-multinode-776386" in "kube-system" namespace to be "Ready" ...
	I0731 10:54:08.889057   96871 pod_ready.go:78] waiting up to 6m0s for pod "kube-controller-manager-multinode-776386" in "kube-system" namespace to be "Ready" ...
	I0731 10:54:08.889094   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-multinode-776386
	I0731 10:54:08.889102   96871 round_trippers.go:469] Request Headers:
	I0731 10:54:08.889109   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:54:08.889116   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:54:08.890613   96871 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
	I0731 10:54:08.890627   96871 round_trippers.go:577] Response Headers:
	I0731 10:54:08.890637   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:54:08 GMT
	I0731 10:54:08.890646   96871 round_trippers.go:580]     Audit-Id: e6553c03-5ca4-478b-b309-3802fe8b8767
	I0731 10:54:08.890655   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:54:08.890663   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:54:08.890675   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:54:08.890685   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:54:08.890820   96871 request.go:1188] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"kube-controller-manager-multinode-776386","namespace":"kube-system","uid":"297aa029-28d8-4509-bda1-56f44d45e10e","resourceVersion":"293","creationTimestamp":"2023-07-31T10:52:32Z","labels":{"component":"kube-controller-manager","tier":"control-plane"},"annotations":{"kubernetes.io/config.hash":"585dd60c757682ef83c5c82367a18a2f","kubernetes.io/config.mirror":"585dd60c757682ef83c5c82367a18a2f","kubernetes.io/config.seen":"2023-07-31T10:52:28.461388115Z","kubernetes.io/config.source":"file"},"ownerReferences":[{"apiVersion":"v1","kind":"Node","name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","controller":true}],"managedFields":[{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:52:32Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:kubernetes.io/config.hash":{},"f:kubernetes.io/config.mirror":{},"f:kubernetes.i
o/config.seen":{},"f:kubernetes.io/config.source":{}},"f:labels":{".":{ [truncated 7794 chars]
	I0731 10:54:08.891162   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:54:08.891173   96871 round_trippers.go:469] Request Headers:
	I0731 10:54:08.891180   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:54:08.891186   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:54:08.892684   96871 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
	I0731 10:54:08.892702   96871 round_trippers.go:577] Response Headers:
	I0731 10:54:08.892712   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:54:08.892721   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:54:08.892729   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:54:08.892739   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:54:08.892749   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:54:08 GMT
	I0731 10:54:08.892761   96871 round_trippers.go:580]     Audit-Id: 411b4003-35d8-42d3-8f6b-8a8f297e3952
	I0731 10:54:08.892854   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"428","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 5947 chars]
	I0731 10:54:08.893273   96871 pod_ready.go:92] pod "kube-controller-manager-multinode-776386" in "kube-system" namespace has status "Ready":"True"
	I0731 10:54:08.893289   96871 pod_ready.go:81] duration metric: took 4.225061ms waiting for pod "kube-controller-manager-multinode-776386" in "kube-system" namespace to be "Ready" ...
	I0731 10:54:08.893300   96871 pod_ready.go:78] waiting up to 6m0s for pod "kube-proxy-59xqp" in "kube-system" namespace to be "Ready" ...
	I0731 10:54:09.065685   96871 request.go:628] Waited for 172.331789ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-proxy-59xqp
	I0731 10:54:09.065734   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-proxy-59xqp
	I0731 10:54:09.065751   96871 round_trippers.go:469] Request Headers:
	I0731 10:54:09.065760   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:54:09.065766   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:54:09.068015   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:54:09.068039   96871 round_trippers.go:577] Response Headers:
	I0731 10:54:09.068047   96871 round_trippers.go:580]     Audit-Id: 357250ab-9308-4aff-88c1-b19e2005aad5
	I0731 10:54:09.068053   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:54:09.068059   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:54:09.068066   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:54:09.068075   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:54:09.068083   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:54:09 GMT
	I0731 10:54:09.068218   96871 request.go:1188] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"kube-proxy-59xqp","generateName":"kube-proxy-","namespace":"kube-system","uid":"e086520b-af9b-4c2e-adc1-cecdf0026890","resourceVersion":"407","creationTimestamp":"2023-07-31T10:52:47Z","labels":{"controller-revision-hash":"56999f657b","k8s-app":"kube-proxy","pod-template-generation":"1"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"DaemonSet","name":"kube-proxy","uid":"8c4cc344-625b-46a6-ad67-8a006a415327","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:52:47Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:controller-revision-hash":{},"f:k8s-app":{},"f:pod-template-generation":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"8c4cc344-625b-46a6-ad67-8a006a415327\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:nodeAffinity":{".":{},"f:r
equiredDuringSchedulingIgnoredDuringExecution":{}}},"f:containers":{"k: [truncated 5510 chars]
	I0731 10:54:09.265067   96871 request.go:628] Waited for 196.351716ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:54:09.265127   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:54:09.265132   96871 round_trippers.go:469] Request Headers:
	I0731 10:54:09.265139   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:54:09.265145   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:54:09.267359   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:54:09.267390   96871 round_trippers.go:577] Response Headers:
	I0731 10:54:09.267399   96871 round_trippers.go:580]     Audit-Id: 5d459894-0895-4d28-9e0c-5e5eed127b97
	I0731 10:54:09.267408   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:54:09.267417   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:54:09.267426   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:54:09.267437   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:54:09.267450   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:54:09 GMT
	I0731 10:54:09.267614   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"428","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 5947 chars]
	I0731 10:54:09.267911   96871 pod_ready.go:92] pod "kube-proxy-59xqp" in "kube-system" namespace has status "Ready":"True"
	I0731 10:54:09.267921   96871 pod_ready.go:81] duration metric: took 374.615943ms waiting for pod "kube-proxy-59xqp" in "kube-system" namespace to be "Ready" ...
	I0731 10:54:09.267939   96871 pod_ready.go:78] waiting up to 6m0s for pod "kube-proxy-95tkz" in "kube-system" namespace to be "Ready" ...
	I0731 10:54:09.465355   96871 request.go:628] Waited for 197.343103ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-proxy-95tkz
	I0731 10:54:09.465407   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-proxy-95tkz
	I0731 10:54:09.465411   96871 round_trippers.go:469] Request Headers:
	I0731 10:54:09.465420   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:54:09.465426   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:54:09.467643   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:54:09.467666   96871 round_trippers.go:577] Response Headers:
	I0731 10:54:09.467676   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:54:09.467686   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:54:09 GMT
	I0731 10:54:09.467695   96871 round_trippers.go:580]     Audit-Id: 856e484b-fe8d-4a54-8986-7e22edf7f139
	I0731 10:54:09.467708   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:54:09.467717   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:54:09.467725   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:54:09.467830   96871 request.go:1188] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"kube-proxy-95tkz","generateName":"kube-proxy-","namespace":"kube-system","uid":"b897a36c-a5da-4e26-82d0-95a2bd342cf3","resourceVersion":"499","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"controller-revision-hash":"56999f657b","k8s-app":"kube-proxy","pod-template-generation":"1"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"DaemonSet","name":"kube-proxy","uid":"8c4cc344-625b-46a6-ad67-8a006a415327","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:controller-revision-hash":{},"f:k8s-app":{},"f:pod-template-generation":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"8c4cc344-625b-46a6-ad67-8a006a415327\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:nodeAffinity":{".":{},"f:r
equiredDuringSchedulingIgnoredDuringExecution":{}}},"f:containers":{"k: [truncated 5518 chars]
	I0731 10:54:09.665614   96871 request.go:628] Waited for 197.34734ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:54:09.665666   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386-m02
	I0731 10:54:09.665673   96871 round_trippers.go:469] Request Headers:
	I0731 10:54:09.665684   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:54:09.665694   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:54:09.667961   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:54:09.667983   96871 round_trippers.go:577] Response Headers:
	I0731 10:54:09.667991   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:54:09.667996   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:54:09.668002   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:54:09.668008   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:54:09 GMT
	I0731 10:54:09.668017   96871 round_trippers.go:580]     Audit-Id: d59d6b10-1c57-4cd4-9638-8f9d83668fb5
	I0731 10:54:09.668025   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:54:09.668150   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386-m02","uid":"2870ca66-a8b5-4dc8-97a6-393811c44eb6","resourceVersion":"533","creationTimestamp":"2023-07-31T10:53:36Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:53:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kube
rnetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:b [truncated 5296 chars]
	I0731 10:54:09.668484   96871 pod_ready.go:92] pod "kube-proxy-95tkz" in "kube-system" namespace has status "Ready":"True"
	I0731 10:54:09.668499   96871 pod_ready.go:81] duration metric: took 400.549866ms waiting for pod "kube-proxy-95tkz" in "kube-system" namespace to be "Ready" ...
	I0731 10:54:09.668510   96871 pod_ready.go:78] waiting up to 6m0s for pod "kube-scheduler-multinode-776386" in "kube-system" namespace to be "Ready" ...
	I0731 10:54:09.864857   96871 request.go:628] Waited for 196.281038ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-scheduler-multinode-776386
	I0731 10:54:09.864930   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-scheduler-multinode-776386
	I0731 10:54:09.864937   96871 round_trippers.go:469] Request Headers:
	I0731 10:54:09.864944   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:54:09.864951   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:54:09.867231   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:54:09.867249   96871 round_trippers.go:577] Response Headers:
	I0731 10:54:09.867256   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:54:09.867263   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:54:09 GMT
	I0731 10:54:09.867269   96871 round_trippers.go:580]     Audit-Id: 266855a2-b407-4882-ad45-ca06eafdc173
	I0731 10:54:09.867274   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:54:09.867282   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:54:09.867290   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:54:09.867419   96871 request.go:1188] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"kube-scheduler-multinode-776386","namespace":"kube-system","uid":"0feb8916-44df-4b76-88e8-2856a50f34b7","resourceVersion":"291","creationTimestamp":"2023-07-31T10:52:34Z","labels":{"component":"kube-scheduler","tier":"control-plane"},"annotations":{"kubernetes.io/config.hash":"828045a76f6580abdd29c2ef20a02983","kubernetes.io/config.mirror":"828045a76f6580abdd29c2ef20a02983","kubernetes.io/config.seen":"2023-07-31T10:52:28.461389129Z","kubernetes.io/config.source":"file"},"ownerReferences":[{"apiVersion":"v1","kind":"Node","name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","controller":true}],"managedFields":[{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-31T10:52:34Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:kubernetes.io/config.hash":{},"f:kubernetes.io/config.mirror":{},"f:kubernetes.io/config.seen":{},
"f:kubernetes.io/config.source":{}},"f:labels":{".":{},"f:component":{} [truncated 4676 chars]
	I0731 10:54:10.065150   96871 request.go:628] Waited for 197.353787ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:54:10.065223   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-776386
	I0731 10:54:10.065229   96871 round_trippers.go:469] Request Headers:
	I0731 10:54:10.065237   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:54:10.065246   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:54:10.067591   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:54:10.067614   96871 round_trippers.go:577] Response Headers:
	I0731 10:54:10.067623   96871 round_trippers.go:580]     Audit-Id: 4a67bf8b-d775-4102-a3c6-a12d17a1ac62
	I0731 10:54:10.067632   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:54:10.067641   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:54:10.067654   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:54:10.067666   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:54:10.067678   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:54:10 GMT
	I0731 10:54:10.067780   96871 request.go:1188] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"428","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","apiVe
rsion":"v1","time":"2023-07-31T10:52:31Z","fieldsType":"FieldsV1","fiel [truncated 5947 chars]
	I0731 10:54:10.068125   96871 pod_ready.go:92] pod "kube-scheduler-multinode-776386" in "kube-system" namespace has status "Ready":"True"
	I0731 10:54:10.068142   96871 pod_ready.go:81] duration metric: took 399.623346ms waiting for pod "kube-scheduler-multinode-776386" in "kube-system" namespace to be "Ready" ...
	I0731 10:54:10.068153   96871 pod_ready.go:38] duration metric: took 1.200530358s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
	I0731 10:54:10.068174   96871 system_svc.go:44] waiting for kubelet service to be running ....
	I0731 10:54:10.068215   96871 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I0731 10:54:10.078635   96871 system_svc.go:56] duration metric: took 10.456026ms WaitForService to wait for kubelet.
	I0731 10:54:10.078656   96871 kubeadm.go:581] duration metric: took 33.234632881s to wait for : map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] ...
	I0731 10:54:10.078678   96871 node_conditions.go:102] verifying NodePressure condition ...
	I0731 10:54:10.265113   96871 request.go:628] Waited for 186.344915ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.58.2:8443/api/v1/nodes
	I0731 10:54:10.265229   96871 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes
	I0731 10:54:10.265242   96871 round_trippers.go:469] Request Headers:
	I0731 10:54:10.265254   96871 round_trippers.go:473]     Accept: application/json, */*
	I0731 10:54:10.265266   96871 round_trippers.go:473]     User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
	I0731 10:54:10.267537   96871 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
	I0731 10:54:10.267561   96871 round_trippers.go:577] Response Headers:
	I0731 10:54:10.267570   96871 round_trippers.go:580]     Date: Mon, 31 Jul 2023 10:54:10 GMT
	I0731 10:54:10.267579   96871 round_trippers.go:580]     Audit-Id: 230fe13b-3e60-4088-a4f3-9cd5b9043e46
	I0731 10:54:10.267587   96871 round_trippers.go:580]     Cache-Control: no-cache, private
	I0731 10:54:10.267596   96871 round_trippers.go:580]     Content-Type: application/json
	I0731 10:54:10.267606   96871 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: e9db14d6-3fd3-41dc-92db-fb87cf6fc8e4
	I0731 10:54:10.267615   96871 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: e1a839a7-e5b0-431a-b1ae-2620521cbf5a
	I0731 10:54:10.267784   96871 request.go:1188] Response Body: {"kind":"NodeList","apiVersion":"v1","metadata":{"resourceVersion":"533"},"items":[{"metadata":{"name":"multinode-776386","uid":"4de4b338-d1a0-4f0a-acb8-6058d42c7868","resourceVersion":"428","creationTimestamp":"2023-07-31T10:52:31Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-776386","kubernetes.io/os":"linux","minikube.k8s.io/commit":"a7848ba25aaaad8ebb50e721c0d343e471188fc7","minikube.k8s.io/name":"multinode-776386","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_07_31T10_52_35_0700","minikube.k8s.io/version":"v1.31.1","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/crio/crio.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields
":[{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":" [truncated 12288 chars]
	I0731 10:54:10.268442   96871 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
	I0731 10:54:10.268460   96871 node_conditions.go:123] node cpu capacity is 8
	I0731 10:54:10.268472   96871 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
	I0731 10:54:10.268477   96871 node_conditions.go:123] node cpu capacity is 8
	I0731 10:54:10.268484   96871 node_conditions.go:105] duration metric: took 189.800579ms to run NodePressure ...
	I0731 10:54:10.268502   96871 start.go:228] waiting for startup goroutines ...
	I0731 10:54:10.268537   96871 start.go:242] writing updated cluster config ...
	I0731 10:54:10.268813   96871 ssh_runner.go:195] Run: rm -f paused
	I0731 10:54:10.312368   96871 start.go:596] kubectl: 1.27.4, cluster: 1.27.3 (minor skew: 0)
	I0731 10:54:10.315085   96871 out.go:177] * Done! kubectl is now configured to use "multinode-776386" cluster and "default" namespace by default
	
	* 
	* ==> CRI-O <==
	* Jul 31 10:53:20 multinode-776386 crio[959]: time="2023-07-31 10:53:20.151002583Z" level=info msg="Starting container: 37f1df67678e61006447be66cb65a86a9639b063880db7cce74828f784d88654" id=c34c0acc-302f-44c2-bf8b-50f1e47c8f6e name=/runtime.v1.RuntimeService/StartContainer
	Jul 31 10:53:20 multinode-776386 crio[959]: time="2023-07-31 10:53:20.158820005Z" level=info msg="Created container 7b3ec5d6bfb40147c31deeca1b16255e2571d8d60d41e392a6413a9de8091532: kube-system/coredns-5d78c9869d-w86c5/coredns" id=7e00d134-6943-4f4c-bef6-3ab1d7c9bcef name=/runtime.v1.RuntimeService/CreateContainer
	Jul 31 10:53:20 multinode-776386 crio[959]: time="2023-07-31 10:53:20.159326378Z" level=info msg="Starting container: 7b3ec5d6bfb40147c31deeca1b16255e2571d8d60d41e392a6413a9de8091532" id=501acce2-1afa-4500-b678-ed12c63e0b70 name=/runtime.v1.RuntimeService/StartContainer
	Jul 31 10:53:20 multinode-776386 crio[959]: time="2023-07-31 10:53:20.159990260Z" level=info msg="Started container" PID=2377 containerID=37f1df67678e61006447be66cb65a86a9639b063880db7cce74828f784d88654 description=kube-system/storage-provisioner/storage-provisioner id=c34c0acc-302f-44c2-bf8b-50f1e47c8f6e name=/runtime.v1.RuntimeService/StartContainer sandboxID=33ddbf16b269627870fbb99833c45f963de5f5249a5f8366a472552c88692233
	Jul 31 10:53:20 multinode-776386 crio[959]: time="2023-07-31 10:53:20.170854086Z" level=info msg="Started container" PID=2385 containerID=7b3ec5d6bfb40147c31deeca1b16255e2571d8d60d41e392a6413a9de8091532 description=kube-system/coredns-5d78c9869d-w86c5/coredns id=501acce2-1afa-4500-b678-ed12c63e0b70 name=/runtime.v1.RuntimeService/StartContainer sandboxID=02bbc97b3a660fcb5dd79720e67830dd226dd403cb7ac6ddafc7a98bf49d09f5
	Jul 31 10:54:11 multinode-776386 crio[959]: time="2023-07-31 10:54:11.364155552Z" level=info msg="Running pod sandbox: default/busybox-67b7f59bb-trlh5/POD" id=184e4cca-283f-4cbd-91ab-897cd69b4c4a name=/runtime.v1.RuntimeService/RunPodSandbox
	Jul 31 10:54:11 multinode-776386 crio[959]: time="2023-07-31 10:54:11.364223866Z" level=warning msg="Allowed annotations are specified for workload []"
	Jul 31 10:54:11 multinode-776386 crio[959]: time="2023-07-31 10:54:11.377164231Z" level=info msg="Got pod network &{Name:busybox-67b7f59bb-trlh5 Namespace:default ID:ec3edc56c8746a9e0cb7957dee198b30690ae358d25ff0ab80b41cc23ffbc033 UID:125622f8-e899-4143-bb27-f36bbd3892ba NetNS:/var/run/netns/f8a7f2cc-c67a-4cac-90f3-b222462f0ce4 Networks:[] RuntimeConfig:map[kindnet:{IP: MAC: PortMappings:[] Bandwidth:<nil> IpRanges:[]}] Aliases:map[]}"
	Jul 31 10:54:11 multinode-776386 crio[959]: time="2023-07-31 10:54:11.377198763Z" level=info msg="Adding pod default_busybox-67b7f59bb-trlh5 to CNI network \"kindnet\" (type=ptp)"
	Jul 31 10:54:11 multinode-776386 crio[959]: time="2023-07-31 10:54:11.386088660Z" level=info msg="Got pod network &{Name:busybox-67b7f59bb-trlh5 Namespace:default ID:ec3edc56c8746a9e0cb7957dee198b30690ae358d25ff0ab80b41cc23ffbc033 UID:125622f8-e899-4143-bb27-f36bbd3892ba NetNS:/var/run/netns/f8a7f2cc-c67a-4cac-90f3-b222462f0ce4 Networks:[] RuntimeConfig:map[kindnet:{IP: MAC: PortMappings:[] Bandwidth:<nil> IpRanges:[]}] Aliases:map[]}"
	Jul 31 10:54:11 multinode-776386 crio[959]: time="2023-07-31 10:54:11.386254102Z" level=info msg="Checking pod default_busybox-67b7f59bb-trlh5 for CNI network kindnet (type=ptp)"
	Jul 31 10:54:11 multinode-776386 crio[959]: time="2023-07-31 10:54:11.409469761Z" level=info msg="Ran pod sandbox ec3edc56c8746a9e0cb7957dee198b30690ae358d25ff0ab80b41cc23ffbc033 with infra container: default/busybox-67b7f59bb-trlh5/POD" id=184e4cca-283f-4cbd-91ab-897cd69b4c4a name=/runtime.v1.RuntimeService/RunPodSandbox
	Jul 31 10:54:11 multinode-776386 crio[959]: time="2023-07-31 10:54:11.410754943Z" level=info msg="Checking image status: gcr.io/k8s-minikube/busybox:1.28" id=f2824a03-23ea-472f-a785-25433fa6df09 name=/runtime.v1.ImageService/ImageStatus
	Jul 31 10:54:11 multinode-776386 crio[959]: time="2023-07-31 10:54:11.410964606Z" level=info msg="Image gcr.io/k8s-minikube/busybox:1.28 not found" id=f2824a03-23ea-472f-a785-25433fa6df09 name=/runtime.v1.ImageService/ImageStatus
	Jul 31 10:54:11 multinode-776386 crio[959]: time="2023-07-31 10:54:11.411730110Z" level=info msg="Pulling image: gcr.io/k8s-minikube/busybox:1.28" id=177193cb-fbf1-450c-aea6-6ee63a4b59ef name=/runtime.v1.ImageService/PullImage
	Jul 31 10:54:11 multinode-776386 crio[959]: time="2023-07-31 10:54:11.415066227Z" level=info msg="Trying to access \"gcr.io/k8s-minikube/busybox:1.28\""
	Jul 31 10:54:11 multinode-776386 crio[959]: time="2023-07-31 10:54:11.574530949Z" level=info msg="Trying to access \"gcr.io/k8s-minikube/busybox:1.28\""
	Jul 31 10:54:11 multinode-776386 crio[959]: time="2023-07-31 10:54:11.965729093Z" level=info msg="Pulled image: gcr.io/k8s-minikube/busybox@sha256:74f634b1bc1bd74535d5209589734efbd44a25f4e2dc96d78784576a3eb5b335" id=177193cb-fbf1-450c-aea6-6ee63a4b59ef name=/runtime.v1.ImageService/PullImage
	Jul 31 10:54:11 multinode-776386 crio[959]: time="2023-07-31 10:54:11.966715865Z" level=info msg="Checking image status: gcr.io/k8s-minikube/busybox:1.28" id=c353dfc1-8968-4b7f-b2c3-01af9e6e4534 name=/runtime.v1.ImageService/ImageStatus
	Jul 31 10:54:11 multinode-776386 crio[959]: time="2023-07-31 10:54:11.967389377Z" level=info msg="Image status: &ImageStatusResponse{Image:&Image{Id:8c811b4aec35f259572d0f79207bc0678df4c736eeec50bc9fec37ed936a472a,RepoTags:[gcr.io/k8s-minikube/busybox:1.28],RepoDigests:[gcr.io/k8s-minikube/busybox@sha256:74f634b1bc1bd74535d5209589734efbd44a25f4e2dc96d78784576a3eb5b335 gcr.io/k8s-minikube/busybox@sha256:9afb80db71730dbb303fe00765cbf34bddbdc6b66e49897fc2e1861967584b12],Size_:1363676,Uid:nil,Username:,Spec:nil,},Info:map[string]string{},}" id=c353dfc1-8968-4b7f-b2c3-01af9e6e4534 name=/runtime.v1.ImageService/ImageStatus
	Jul 31 10:54:11 multinode-776386 crio[959]: time="2023-07-31 10:54:11.968164641Z" level=info msg="Creating container: default/busybox-67b7f59bb-trlh5/busybox" id=05b61b71-d7ba-4777-989a-e9dd02f7c331 name=/runtime.v1.RuntimeService/CreateContainer
	Jul 31 10:54:11 multinode-776386 crio[959]: time="2023-07-31 10:54:11.968275462Z" level=warning msg="Allowed annotations are specified for workload []"
	Jul 31 10:54:12 multinode-776386 crio[959]: time="2023-07-31 10:54:12.018545655Z" level=info msg="Created container e8b891e7b0f5ca86f988cb05ea451b80ca5c5d576d00ce05fad7e19fc7c87062: default/busybox-67b7f59bb-trlh5/busybox" id=05b61b71-d7ba-4777-989a-e9dd02f7c331 name=/runtime.v1.RuntimeService/CreateContainer
	Jul 31 10:54:12 multinode-776386 crio[959]: time="2023-07-31 10:54:12.019229703Z" level=info msg="Starting container: e8b891e7b0f5ca86f988cb05ea451b80ca5c5d576d00ce05fad7e19fc7c87062" id=4b41f5f2-5391-4bd1-873a-e2c1bc1bf3a4 name=/runtime.v1.RuntimeService/StartContainer
	Jul 31 10:54:12 multinode-776386 crio[959]: time="2023-07-31 10:54:12.027136786Z" level=info msg="Started container" PID=2558 containerID=e8b891e7b0f5ca86f988cb05ea451b80ca5c5d576d00ce05fad7e19fc7c87062 description=default/busybox-67b7f59bb-trlh5/busybox id=4b41f5f2-5391-4bd1-873a-e2c1bc1bf3a4 name=/runtime.v1.RuntimeService/StartContainer sandboxID=ec3edc56c8746a9e0cb7957dee198b30690ae358d25ff0ab80b41cc23ffbc033
	
	* 
	* ==> container status <==
	* CONTAINER           IMAGE                                                                                                 CREATED              STATE               NAME                      ATTEMPT             POD ID              POD
	e8b891e7b0f5c       gcr.io/k8s-minikube/busybox@sha256:74f634b1bc1bd74535d5209589734efbd44a25f4e2dc96d78784576a3eb5b335   4 seconds ago        Running             busybox                   0                   ec3edc56c8746       busybox-67b7f59bb-trlh5
	7b3ec5d6bfb40       ead0a4a53df89fd173874b46093b6e62d8c72967bbf606d672c9e8c9b601a4fc                                      56 seconds ago       Running             coredns                   0                   02bbc97b3a660       coredns-5d78c9869d-w86c5
	37f1df67678e6       6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562                                      56 seconds ago       Running             storage-provisioner       0                   33ddbf16b2696       storage-provisioner
	c510bda0e8b56       b0b1fa0f58c6e932b7f20bf208b2841317a1e8c88cc51b18358310bbd8ec95da                                      About a minute ago   Running             kindnet-cni               0                   325ded3790f81       kindnet-zrs4n
	d296d17e60eff       5780543258cf06f98595c003c0c6d22768d1fc8e9852e2839018a4bb3bfe163c                                      About a minute ago   Running             kube-proxy                0                   64714f7532d8a       kube-proxy-59xqp
	a8fe782430637       08a0c939e61b7340db53ebf07b4d0e908a35ad8d94e2cb7d0f958210e567079a                                      About a minute ago   Running             kube-apiserver            0                   8003d5c2b1bf3       kube-apiserver-multinode-776386
	e18d301a11bd5       7cffc01dba0e151e525544f87958d12c0fa62a9f173bbc930200ce815f2aaf3f                                      About a minute ago   Running             kube-controller-manager   0                   dbe1d1d41e540       kube-controller-manager-multinode-776386
	6e735c501188b       41697ceeb70b3f49e54ed46f2cf27ac5b3a201a7d9668ca327588b23fafdf36a                                      About a minute ago   Running             kube-scheduler            0                   90020a223e0b1       kube-scheduler-multinode-776386
	a41c3fe7ae459       86b6af7dd652c1b38118be1c338e9354b33469e69a218f7e290a0ca5304ad681                                      About a minute ago   Running             etcd                      0                   9baa0e1b5a7b8       etcd-multinode-776386
	
	* 
	* ==> coredns [7b3ec5d6bfb40147c31deeca1b16255e2571d8d60d41e392a6413a9de8091532] <==
	* [INFO] 10.244.0.3:47568 - 9 "PTR IN 1.0.96.10.in-addr.arpa. udp 40 false 512" NOERROR qr,aa,rd 112 0.000091273s
	[INFO] 10.244.1.2:55527 - 2 "PTR IN 10.0.96.10.in-addr.arpa. udp 41 false 512" NOERROR qr,aa,rd 116 0.000127894s
	[INFO] 10.244.1.2:33549 - 3 "AAAA IN kubernetes.default. udp 36 false 512" NXDOMAIN qr,rd,ra 36 0.001630622s
	[INFO] 10.244.1.2:51922 - 4 "AAAA IN kubernetes.default.default.svc.cluster.local. udp 62 false 512" NXDOMAIN qr,aa,rd 155 0.00008677s
	[INFO] 10.244.1.2:33142 - 5 "AAAA IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 147 0.000047941s
	[INFO] 10.244.1.2:50930 - 6 "A IN kubernetes.default. udp 36 false 512" NXDOMAIN qr,rd,ra 36 0.001166987s
	[INFO] 10.244.1.2:57269 - 7 "A IN kubernetes.default.default.svc.cluster.local. udp 62 false 512" NXDOMAIN qr,aa,rd 155 0.000061488s
	[INFO] 10.244.1.2:42282 - 8 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 106 0.000067718s
	[INFO] 10.244.1.2:59363 - 9 "PTR IN 1.0.96.10.in-addr.arpa. udp 40 false 512" NOERROR qr,aa,rd 112 0.000044355s
	[INFO] 10.244.0.3:35571 - 2 "PTR IN 10.0.96.10.in-addr.arpa. udp 41 false 512" NOERROR qr,aa,rd 116 0.000089922s
	[INFO] 10.244.0.3:45883 - 3 "AAAA IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 147 0.00004662s
	[INFO] 10.244.0.3:45926 - 4 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 106 0.000079715s
	[INFO] 10.244.0.3:47355 - 5 "PTR IN 1.0.96.10.in-addr.arpa. udp 40 false 512" NOERROR qr,aa,rd 112 0.000035453s
	[INFO] 10.244.1.2:52908 - 2 "PTR IN 10.0.96.10.in-addr.arpa. udp 41 false 512" NOERROR qr,aa,rd 116 0.000113257s
	[INFO] 10.244.1.2:45782 - 3 "AAAA IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 147 0.000088588s
	[INFO] 10.244.1.2:50057 - 4 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 106 0.00005022s
	[INFO] 10.244.1.2:50465 - 5 "PTR IN 1.0.96.10.in-addr.arpa. udp 40 false 512" NOERROR qr,aa,rd 112 0.000046795s
	[INFO] 10.244.0.3:44127 - 2 "PTR IN 10.0.96.10.in-addr.arpa. udp 41 false 512" NOERROR qr,aa,rd 116 0.00010706s
	[INFO] 10.244.0.3:44024 - 3 "AAAA IN host.minikube.internal. udp 40 false 512" NOERROR qr,aa,rd 40 0.000110843s
	[INFO] 10.244.0.3:39709 - 4 "A IN host.minikube.internal. udp 40 false 512" NOERROR qr,aa,rd 78 0.000121794s
	[INFO] 10.244.0.3:50220 - 5 "PTR IN 1.58.168.192.in-addr.arpa. udp 43 false 512" NOERROR qr,aa,rd 104 0.000087506s
	[INFO] 10.244.1.2:50614 - 2 "PTR IN 10.0.96.10.in-addr.arpa. udp 41 false 512" NOERROR qr,aa,rd 116 0.000110445s
	[INFO] 10.244.1.2:36548 - 3 "AAAA IN host.minikube.internal. udp 40 false 512" NOERROR qr,aa,rd 40 0.000102608s
	[INFO] 10.244.1.2:60652 - 4 "A IN host.minikube.internal. udp 40 false 512" NOERROR qr,aa,rd 78 0.00007565s
	[INFO] 10.244.1.2:50946 - 5 "PTR IN 1.58.168.192.in-addr.arpa. udp 43 false 512" NOERROR qr,aa,rd 104 0.000054292s
	
	* 
	* ==> describe nodes <==
	* Name:               multinode-776386
	Roles:              control-plane
	Labels:             beta.kubernetes.io/arch=amd64
	                    beta.kubernetes.io/os=linux
	                    kubernetes.io/arch=amd64
	                    kubernetes.io/hostname=multinode-776386
	                    kubernetes.io/os=linux
	                    minikube.k8s.io/commit=a7848ba25aaaad8ebb50e721c0d343e471188fc7
	                    minikube.k8s.io/name=multinode-776386
	                    minikube.k8s.io/primary=true
	                    minikube.k8s.io/updated_at=2023_07_31T10_52_35_0700
	                    minikube.k8s.io/version=v1.31.1
	                    node-role.kubernetes.io/control-plane=
	                    node.kubernetes.io/exclude-from-external-load-balancers=
	Annotations:        kubeadm.alpha.kubernetes.io/cri-socket: unix:///var/run/crio/crio.sock
	                    node.alpha.kubernetes.io/ttl: 0
	                    volumes.kubernetes.io/controller-managed-attach-detach: true
	CreationTimestamp:  Mon, 31 Jul 2023 10:52:31 +0000
	Taints:             <none>
	Unschedulable:      false
	Lease:
	  HolderIdentity:  multinode-776386
	  AcquireTime:     <unset>
	  RenewTime:       Mon, 31 Jul 2023 10:54:07 +0000
	Conditions:
	  Type             Status  LastHeartbeatTime                 LastTransitionTime                Reason                       Message
	  ----             ------  -----------------                 ------------------                ------                       -------
	  MemoryPressure   False   Mon, 31 Jul 2023 10:53:19 +0000   Mon, 31 Jul 2023 10:52:30 +0000   KubeletHasSufficientMemory   kubelet has sufficient memory available
	  DiskPressure     False   Mon, 31 Jul 2023 10:53:19 +0000   Mon, 31 Jul 2023 10:52:30 +0000   KubeletHasNoDiskPressure     kubelet has no disk pressure
	  PIDPressure      False   Mon, 31 Jul 2023 10:53:19 +0000   Mon, 31 Jul 2023 10:52:30 +0000   KubeletHasSufficientPID      kubelet has sufficient PID available
	  Ready            True    Mon, 31 Jul 2023 10:53:19 +0000   Mon, 31 Jul 2023 10:53:19 +0000   KubeletReady                 kubelet is posting ready status
	Addresses:
	  InternalIP:  192.168.58.2
	  Hostname:    multinode-776386
	Capacity:
	  cpu:                8
	  ephemeral-storage:  304681132Ki
	  hugepages-1Gi:      0
	  hugepages-2Mi:      0
	  memory:             32859436Ki
	  pods:               110
	Allocatable:
	  cpu:                8
	  ephemeral-storage:  304681132Ki
	  hugepages-1Gi:      0
	  hugepages-2Mi:      0
	  memory:             32859436Ki
	  pods:               110
	System Info:
	  Machine ID:                 2bf69c14ed0e41a58129ac3a973ced24
	  System UUID:                01dd57e6-96d1-468c-98d3-16b27aaca462
	  Boot ID:                    29fc075f-138b-4be6-bf1b-3db3f063b35c
	  Kernel Version:             5.15.0-1038-gcp
	  OS Image:                   Ubuntu 22.04.2 LTS
	  Operating System:           linux
	  Architecture:               amd64
	  Container Runtime Version:  cri-o://1.24.6
	  Kubelet Version:            v1.27.3
	  Kube-Proxy Version:         v1.27.3
	PodCIDR:                      10.244.0.0/24
	PodCIDRs:                     10.244.0.0/24
	Non-terminated Pods:          (9 in total)
	  Namespace                   Name                                        CPU Requests  CPU Limits  Memory Requests  Memory Limits  Age
	  ---------                   ----                                        ------------  ----------  ---------------  -------------  ---
	  default                     busybox-67b7f59bb-trlh5                     0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         5s
	  kube-system                 coredns-5d78c9869d-w86c5                    100m (1%!)(MISSING)     0 (0%!)(MISSING)      70Mi (0%!)(MISSING)        170Mi (0%!)(MISSING)     88s
	  kube-system                 etcd-multinode-776386                       100m (1%!)(MISSING)     0 (0%!)(MISSING)      100Mi (0%!)(MISSING)       0 (0%!)(MISSING)         102s
	  kube-system                 kindnet-zrs4n                               100m (1%!)(MISSING)     100m (1%!)(MISSING)   50Mi (0%!)(MISSING)        50Mi (0%!)(MISSING)      89s
	  kube-system                 kube-apiserver-multinode-776386             250m (3%!)(MISSING)     0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         102s
	  kube-system                 kube-controller-manager-multinode-776386    200m (2%!)(MISSING)     0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         104s
	  kube-system                 kube-proxy-59xqp                            0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         89s
	  kube-system                 kube-scheduler-multinode-776386             100m (1%!)(MISSING)     0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         102s
	  kube-system                 storage-provisioner                         0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         87s
	Allocated resources:
	  (Total limits may be over 100 percent, i.e., overcommitted.)
	  Resource           Requests    Limits
	  --------           --------    ------
	  cpu                850m (10%!)(MISSING)  100m (1%!)(MISSING)
	  memory             220Mi (0%!)(MISSING)  220Mi (0%!)(MISSING)
	  ephemeral-storage  0 (0%!)(MISSING)      0 (0%!)(MISSING)
	  hugepages-1Gi      0 (0%!)(MISSING)      0 (0%!)(MISSING)
	  hugepages-2Mi      0 (0%!)(MISSING)      0 (0%!)(MISSING)
	Events:
	  Type    Reason                   Age                  From             Message
	  ----    ------                   ----                 ----             -------
	  Normal  Starting                 86s                  kube-proxy       
	  Normal  Starting                 108s                 kubelet          Starting kubelet.
	  Normal  NodeHasSufficientMemory  108s (x8 over 108s)  kubelet          Node multinode-776386 status is now: NodeHasSufficientMemory
	  Normal  NodeHasNoDiskPressure    108s (x8 over 108s)  kubelet          Node multinode-776386 status is now: NodeHasNoDiskPressure
	  Normal  NodeHasSufficientPID     108s (x8 over 108s)  kubelet          Node multinode-776386 status is now: NodeHasSufficientPID
	  Normal  Starting                 102s                 kubelet          Starting kubelet.
	  Normal  NodeHasSufficientMemory  102s                 kubelet          Node multinode-776386 status is now: NodeHasSufficientMemory
	  Normal  NodeHasNoDiskPressure    102s                 kubelet          Node multinode-776386 status is now: NodeHasNoDiskPressure
	  Normal  NodeHasSufficientPID     102s                 kubelet          Node multinode-776386 status is now: NodeHasSufficientPID
	  Normal  RegisteredNode           89s                  node-controller  Node multinode-776386 event: Registered Node multinode-776386 in Controller
	  Normal  NodeReady                57s                  kubelet          Node multinode-776386 status is now: NodeReady
	
	
	Name:               multinode-776386-m02
	Roles:              <none>
	Labels:             beta.kubernetes.io/arch=amd64
	                    beta.kubernetes.io/os=linux
	                    kubernetes.io/arch=amd64
	                    kubernetes.io/hostname=multinode-776386-m02
	                    kubernetes.io/os=linux
	Annotations:        kubeadm.alpha.kubernetes.io/cri-socket: /var/run/crio/crio.sock
	                    node.alpha.kubernetes.io/ttl: 0
	                    volumes.kubernetes.io/controller-managed-attach-detach: true
	CreationTimestamp:  Mon, 31 Jul 2023 10:53:36 +0000
	Taints:             <none>
	Unschedulable:      false
	Lease:
	  HolderIdentity:  multinode-776386-m02
	  AcquireTime:     <unset>
	  RenewTime:       Mon, 31 Jul 2023 10:54:06 +0000
	Conditions:
	  Type             Status  LastHeartbeatTime                 LastTransitionTime                Reason                       Message
	  ----             ------  -----------------                 ------------------                ------                       -------
	  MemoryPressure   False   Mon, 31 Jul 2023 10:54:08 +0000   Mon, 31 Jul 2023 10:53:36 +0000   KubeletHasSufficientMemory   kubelet has sufficient memory available
	  DiskPressure     False   Mon, 31 Jul 2023 10:54:08 +0000   Mon, 31 Jul 2023 10:53:36 +0000   KubeletHasNoDiskPressure     kubelet has no disk pressure
	  PIDPressure      False   Mon, 31 Jul 2023 10:54:08 +0000   Mon, 31 Jul 2023 10:53:36 +0000   KubeletHasSufficientPID      kubelet has sufficient PID available
	  Ready            True    Mon, 31 Jul 2023 10:54:08 +0000   Mon, 31 Jul 2023 10:54:08 +0000   KubeletReady                 kubelet is posting ready status
	Addresses:
	  InternalIP:  192.168.58.3
	  Hostname:    multinode-776386-m02
	Capacity:
	  cpu:                8
	  ephemeral-storage:  304681132Ki
	  hugepages-1Gi:      0
	  hugepages-2Mi:      0
	  memory:             32859436Ki
	  pods:               110
	Allocatable:
	  cpu:                8
	  ephemeral-storage:  304681132Ki
	  hugepages-1Gi:      0
	  hugepages-2Mi:      0
	  memory:             32859436Ki
	  pods:               110
	System Info:
	  Machine ID:                 bc649751224046c185df68a74ee9e945
	  System UUID:                ccd65b44-7cde-4054-acf4-ee3e2e93569b
	  Boot ID:                    29fc075f-138b-4be6-bf1b-3db3f063b35c
	  Kernel Version:             5.15.0-1038-gcp
	  OS Image:                   Ubuntu 22.04.2 LTS
	  Operating System:           linux
	  Architecture:               amd64
	  Container Runtime Version:  cri-o://1.24.6
	  Kubelet Version:            v1.27.3
	  Kube-Proxy Version:         v1.27.3
	PodCIDR:                      10.244.1.0/24
	PodCIDRs:                     10.244.1.0/24
	Non-terminated Pods:          (3 in total)
	  Namespace                   Name                       CPU Requests  CPU Limits  Memory Requests  Memory Limits  Age
	  ---------                   ----                       ------------  ----------  ---------------  -------------  ---
	  default                     busybox-67b7f59bb-tvf5p    0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         5s
	  kube-system                 kindnet-mnhhf              100m (1%!)(MISSING)     100m (1%!)(MISSING)   50Mi (0%!)(MISSING)        50Mi (0%!)(MISSING)      40s
	  kube-system                 kube-proxy-95tkz           0 (0%!)(MISSING)        0 (0%!)(MISSING)      0 (0%!)(MISSING)           0 (0%!)(MISSING)         40s
	Allocated resources:
	  (Total limits may be over 100 percent, i.e., overcommitted.)
	  Resource           Requests   Limits
	  --------           --------   ------
	  cpu                100m (1%!)(MISSING)  100m (1%!)(MISSING)
	  memory             50Mi (0%!)(MISSING)  50Mi (0%!)(MISSING)
	  ephemeral-storage  0 (0%!)(MISSING)     0 (0%!)(MISSING)
	  hugepages-1Gi      0 (0%!)(MISSING)     0 (0%!)(MISSING)
	  hugepages-2Mi      0 (0%!)(MISSING)     0 (0%!)(MISSING)
	Events:
	  Type    Reason                   Age                From             Message
	  ----    ------                   ----               ----             -------
	  Normal  Starting                 38s                kube-proxy       
	  Normal  NodeHasSufficientMemory  40s (x5 over 42s)  kubelet          Node multinode-776386-m02 status is now: NodeHasSufficientMemory
	  Normal  NodeHasNoDiskPressure    40s (x5 over 42s)  kubelet          Node multinode-776386-m02 status is now: NodeHasNoDiskPressure
	  Normal  NodeHasSufficientPID     40s (x5 over 42s)  kubelet          Node multinode-776386-m02 status is now: NodeHasSufficientPID
	  Normal  RegisteredNode           39s                node-controller  Node multinode-776386-m02 event: Registered Node multinode-776386-m02 in Controller
	  Normal  NodeReady                8s                 kubelet          Node multinode-776386-m02 status is now: NodeReady
	
	* 
	* ==> dmesg <==
	* [  +0.004911] FS-Cache: N-cookie c=0000000f [p=00000003 fl=2 nc=0 na=1]
	[  +0.006641] FS-Cache: N-cookie d=000000003af63fe5{9p.inode} n=00000000d1059337
	[  +0.008699] FS-Cache: N-key=[8] '88a00f0200000000'
	[  +0.248824] FS-Cache: Duplicate cookie detected
	[  +0.004664] FS-Cache: O-cookie c=00000009 [p=00000003 fl=226 nc=0 na=1]
	[  +0.006749] FS-Cache: O-cookie d=000000003af63fe5{9p.inode} n=0000000050399130
	[  +0.007352] FS-Cache: O-key=[8] '92a00f0200000000'
	[  +0.004961] FS-Cache: N-cookie c=00000010 [p=00000003 fl=2 nc=0 na=1]
	[  +0.006590] FS-Cache: N-cookie d=000000003af63fe5{9p.inode} n=000000007a051ada
	[  +0.008734] FS-Cache: N-key=[8] '92a00f0200000000'
	[ +20.004980] kmem.limit_in_bytes is deprecated and will be removed. Please report your usecase to linux-mm@kvack.org if you depend on this functionality.
	[Jul31 10:44] IPv4: martian source 10.244.0.5 from 127.0.0.1, on dev eth0
	[  +0.000006] ll header: 00000000: 92 cf 61 ac 24 44 52 90 48 90 1a 74 08 00
	[  +1.020117] IPv4: martian source 10.244.0.5 from 127.0.0.1, on dev eth0
	[  +0.000005] ll header: 00000000: 92 cf 61 ac 24 44 52 90 48 90 1a 74 08 00
	[  +2.015807] IPv4: martian source 10.244.0.5 from 127.0.0.1, on dev eth0
	[  +0.000006] ll header: 00000000: 92 cf 61 ac 24 44 52 90 48 90 1a 74 08 00
	[  +4.127602] IPv4: martian source 10.244.0.5 from 127.0.0.1, on dev eth0
	[  +0.000008] ll header: 00000000: 92 cf 61 ac 24 44 52 90 48 90 1a 74 08 00
	[Jul31 10:45] IPv4: martian source 10.244.0.5 from 127.0.0.1, on dev eth0
	[  +0.000006] ll header: 00000000: 92 cf 61 ac 24 44 52 90 48 90 1a 74 08 00
	[ +16.126451] IPv4: martian source 10.244.0.5 from 127.0.0.1, on dev eth0
	[  +0.000006] ll header: 00000000: 92 cf 61 ac 24 44 52 90 48 90 1a 74 08 00
	[ +33.276809] IPv4: martian source 10.244.0.5 from 127.0.0.1, on dev eth0
	[  +0.000008] ll header: 00000000: 92 cf 61 ac 24 44 52 90 48 90 1a 74 08 00
	
	* 
	* ==> etcd [a41c3fe7ae459bb03c8e9a47ffa90d13efd8ab1d6298516f71e83d6a7443317f] <==
	* {"level":"info","ts":"2023-07-31T10:52:29.218Z","caller":"membership/cluster.go:421","msg":"added member","cluster-id":"3a56e4ca95e2355c","local-member-id":"b2c6679ac05f2cf1","added-peer-id":"b2c6679ac05f2cf1","added-peer-peer-urls":["https://192.168.58.2:2380"]}
	{"level":"info","ts":"2023-07-31T10:52:29.219Z","caller":"embed/etcd.go:687","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
	{"level":"info","ts":"2023-07-31T10:52:29.219Z","caller":"embed/etcd.go:586","msg":"serving peer traffic","address":"192.168.58.2:2380"}
	{"level":"info","ts":"2023-07-31T10:52:29.219Z","caller":"embed/etcd.go:558","msg":"cmux::serve","address":"192.168.58.2:2380"}
	{"level":"info","ts":"2023-07-31T10:52:29.219Z","caller":"embed/etcd.go:275","msg":"now serving peer/client/metrics","local-member-id":"b2c6679ac05f2cf1","initial-advertise-peer-urls":["https://192.168.58.2:2380"],"listen-peer-urls":["https://192.168.58.2:2380"],"advertise-client-urls":["https://192.168.58.2:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.58.2:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
	{"level":"info","ts":"2023-07-31T10:52:29.219Z","caller":"embed/etcd.go:762","msg":"serving metrics","address":"http://127.0.0.1:2381"}
	{"level":"info","ts":"2023-07-31T10:52:30.107Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"b2c6679ac05f2cf1 is starting a new election at term 1"}
	{"level":"info","ts":"2023-07-31T10:52:30.108Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"b2c6679ac05f2cf1 became pre-candidate at term 1"}
	{"level":"info","ts":"2023-07-31T10:52:30.108Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"b2c6679ac05f2cf1 received MsgPreVoteResp from b2c6679ac05f2cf1 at term 1"}
	{"level":"info","ts":"2023-07-31T10:52:30.108Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"b2c6679ac05f2cf1 became candidate at term 2"}
	{"level":"info","ts":"2023-07-31T10:52:30.108Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"b2c6679ac05f2cf1 received MsgVoteResp from b2c6679ac05f2cf1 at term 2"}
	{"level":"info","ts":"2023-07-31T10:52:30.108Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"b2c6679ac05f2cf1 became leader at term 2"}
	{"level":"info","ts":"2023-07-31T10:52:30.108Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: b2c6679ac05f2cf1 elected leader b2c6679ac05f2cf1 at term 2"}
	{"level":"info","ts":"2023-07-31T10:52:30.109Z","caller":"etcdserver/server.go:2062","msg":"published local member to cluster through raft","local-member-id":"b2c6679ac05f2cf1","local-member-attributes":"{Name:multinode-776386 ClientURLs:[https://192.168.58.2:2379]}","request-path":"/0/members/b2c6679ac05f2cf1/attributes","cluster-id":"3a56e4ca95e2355c","publish-timeout":"7s"}
	{"level":"info","ts":"2023-07-31T10:52:30.109Z","caller":"embed/serve.go:100","msg":"ready to serve client requests"}
	{"level":"info","ts":"2023-07-31T10:52:30.109Z","caller":"embed/serve.go:100","msg":"ready to serve client requests"}
	{"level":"info","ts":"2023-07-31T10:52:30.109Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
	{"level":"info","ts":"2023-07-31T10:52:30.109Z","caller":"etcdserver/server.go:2571","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
	{"level":"info","ts":"2023-07-31T10:52:30.109Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
	{"level":"info","ts":"2023-07-31T10:52:30.110Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"3a56e4ca95e2355c","local-member-id":"b2c6679ac05f2cf1","cluster-version":"3.5"}
	{"level":"info","ts":"2023-07-31T10:52:30.110Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
	{"level":"info","ts":"2023-07-31T10:52:30.110Z","caller":"etcdserver/server.go:2595","msg":"cluster version is updated","cluster-version":"3.5"}
	{"level":"info","ts":"2023-07-31T10:52:30.110Z","caller":"embed/serve.go:198","msg":"serving client traffic securely","address":"127.0.0.1:2379"}
	{"level":"info","ts":"2023-07-31T10:52:30.110Z","caller":"embed/serve.go:198","msg":"serving client traffic securely","address":"192.168.58.2:2379"}
	{"level":"info","ts":"2023-07-31T10:53:26.378Z","caller":"traceutil/trace.go:171","msg":"trace[1239468638] transaction","detail":"{read_only:false; response_revision:454; number_of_response:1; }","duration":"132.470262ms","start":"2023-07-31T10:53:26.246Z","end":"2023-07-31T10:53:26.378Z","steps":["trace[1239468638] 'process raft request'  (duration: 132.372768ms)"],"step_count":1}
	
	* 
	* ==> kernel <==
	*  10:54:16 up 36 min,  0 users,  load average: 0.99, 1.16, 0.86
	Linux multinode-776386 5.15.0-1038-gcp #46~20.04.1-Ubuntu SMP Fri Jul 14 09:48:19 UTC 2023 x86_64 x86_64 x86_64 GNU/Linux
	PRETTY_NAME="Ubuntu 22.04.2 LTS"
	
	* 
	* ==> kindnet [c510bda0e8b56e709579cdac0d94f43f17c29cc7bb452d56dd96ad87c368089c] <==
	* I0731 10:52:49.306940       1 main.go:116] setting mtu 1500 for CNI 
	I0731 10:52:49.306990       1 main.go:146] kindnetd IP family: "ipv4"
	I0731 10:52:49.307020       1 main.go:150] noMask IPv4 subnets: [10.244.0.0/16]
	I0731 10:53:19.515174       1 main.go:191] Failed to get nodes, retrying after error: Get "https://10.96.0.1:443/api/v1/nodes": dial tcp 10.96.0.1:443: i/o timeout
	I0731 10:53:19.522758       1 main.go:223] Handling node with IPs: map[192.168.58.2:{}]
	I0731 10:53:19.522783       1 main.go:227] handling current node
	I0731 10:53:29.533539       1 main.go:223] Handling node with IPs: map[192.168.58.2:{}]
	I0731 10:53:29.533570       1 main.go:227] handling current node
	I0731 10:53:39.545580       1 main.go:223] Handling node with IPs: map[192.168.58.2:{}]
	I0731 10:53:39.545611       1 main.go:227] handling current node
	I0731 10:53:39.545621       1 main.go:223] Handling node with IPs: map[192.168.58.3:{}]
	I0731 10:53:39.545626       1 main.go:250] Node multinode-776386-m02 has CIDR [10.244.1.0/24] 
	I0731 10:53:39.545787       1 routes.go:62] Adding route {Ifindex: 0 Dst: 10.244.1.0/24 Src: <nil> Gw: 192.168.58.3 Flags: [] Table: 0} 
	I0731 10:53:49.557866       1 main.go:223] Handling node with IPs: map[192.168.58.2:{}]
	I0731 10:53:49.557897       1 main.go:227] handling current node
	I0731 10:53:49.557920       1 main.go:223] Handling node with IPs: map[192.168.58.3:{}]
	I0731 10:53:49.557927       1 main.go:250] Node multinode-776386-m02 has CIDR [10.244.1.0/24] 
	I0731 10:53:59.569666       1 main.go:223] Handling node with IPs: map[192.168.58.2:{}]
	I0731 10:53:59.569690       1 main.go:227] handling current node
	I0731 10:53:59.569699       1 main.go:223] Handling node with IPs: map[192.168.58.3:{}]
	I0731 10:53:59.569703       1 main.go:250] Node multinode-776386-m02 has CIDR [10.244.1.0/24] 
	I0731 10:54:09.573479       1 main.go:223] Handling node with IPs: map[192.168.58.2:{}]
	I0731 10:54:09.573503       1 main.go:227] handling current node
	I0731 10:54:09.573514       1 main.go:223] Handling node with IPs: map[192.168.58.3:{}]
	I0731 10:54:09.573521       1 main.go:250] Node multinode-776386-m02 has CIDR [10.244.1.0/24] 
	
	* 
	* ==> kube-apiserver [a8fe782430637030087d8bc1aed4923706c64e2538bc36712c92c7cd92726f0f] <==
	* I0731 10:52:31.416071       1 cache.go:39] Caches are synced for autoregister controller
	I0731 10:52:31.505509       1 cache.go:39] Caches are synced for APIServiceRegistrationController controller
	I0731 10:52:31.505914       1 shared_informer.go:318] Caches are synced for configmaps
	I0731 10:52:31.506352       1 cache.go:39] Caches are synced for AvailableConditionController controller
	I0731 10:52:31.506573       1 controller.go:624] quota admission added evaluator for: namespaces
	I0731 10:52:31.506582       1 shared_informer.go:318] Caches are synced for cluster_authentication_trust_controller
	E0731 10:52:31.510512       1 controller.go:146] "Failed to ensure lease exists, will retry" err="namespaces \"kube-system\" not found" interval="200ms"
	I0731 10:52:31.513119       1 shared_informer.go:318] Caches are synced for node_authorizer
	I0731 10:52:31.713597       1 controller.go:624] quota admission added evaluator for: leases.coordination.k8s.io
	I0731 10:52:32.076039       1 controller.go:132] OpenAPI AggregationController: action for item k8s_internal_local_delegation_chain_0000000000: Nothing (removed from the queue).
	I0731 10:52:32.312434       1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
	I0731 10:52:32.315936       1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
	I0731 10:52:32.315961       1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
	I0731 10:52:32.689100       1 controller.go:624] quota admission added evaluator for: roles.rbac.authorization.k8s.io
	I0731 10:52:32.727622       1 controller.go:624] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
	I0731 10:52:32.825042       1 alloc.go:330] "allocated clusterIPs" service="default/kubernetes" clusterIPs=map[IPv4:10.96.0.1]
	W0731 10:52:32.829973       1 lease.go:251] Resetting endpoints for master service "kubernetes" to [192.168.58.2]
	I0731 10:52:32.830778       1 controller.go:624] quota admission added evaluator for: endpoints
	I0731 10:52:32.834079       1 controller.go:624] quota admission added evaluator for: endpointslices.discovery.k8s.io
	I0731 10:52:33.422161       1 controller.go:624] quota admission added evaluator for: serviceaccounts
	I0731 10:52:34.391029       1 controller.go:624] quota admission added evaluator for: deployments.apps
	I0731 10:52:34.400058       1 alloc.go:330] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs=map[IPv4:10.96.0.10]
	I0731 10:52:34.408686       1 controller.go:624] quota admission added evaluator for: daemonsets.apps
	I0731 10:52:47.731754       1 controller.go:624] quota admission added evaluator for: controllerrevisions.apps
	I0731 10:52:48.230658       1 controller.go:624] quota admission added evaluator for: replicasets.apps
	
	* 
	* ==> kube-controller-manager [e18d301a11bd5e83cb9fd84c02aac18614868d4996cd85e385da949fc989d842] <==
	* I0731 10:52:47.828440       1 shared_informer.go:318] Caches are synced for certificate-csrapproving
	I0731 10:52:47.829683       1 shared_informer.go:318] Caches are synced for certificate-csrsigning-kubelet-serving
	I0731 10:52:47.830817       1 shared_informer.go:318] Caches are synced for certificate-csrsigning-kubelet-client
	I0731 10:52:47.831998       1 shared_informer.go:318] Caches are synced for certificate-csrsigning-legacy-unknown
	I0731 10:52:47.832024       1 shared_informer.go:318] Caches are synced for certificate-csrsigning-kube-apiserver-client
	I0731 10:52:48.148168       1 shared_informer.go:318] Caches are synced for garbage collector
	I0731 10:52:48.179612       1 shared_informer.go:318] Caches are synced for garbage collector
	I0731 10:52:48.179640       1 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage"
	I0731 10:52:48.233986       1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set coredns-5d78c9869d to 2"
	I0731 10:52:48.616735       1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-5d78c9869d to 1 from 2"
	I0731 10:52:48.638467       1 event.go:307] "Event occurred" object="kube-system/coredns-5d78c9869d" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5d78c9869d-mjnjv"
	I0731 10:52:48.644773       1 event.go:307] "Event occurred" object="kube-system/coredns-5d78c9869d" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5d78c9869d-w86c5"
	I0731 10:52:48.724126       1 event.go:307] "Event occurred" object="kube-system/coredns-5d78c9869d" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-5d78c9869d-mjnjv"
	I0731 10:53:22.633197       1 node_lifecycle_controller.go:1046] "Controller detected that some Nodes are Ready. Exiting master disruption mode"
	I0731 10:53:36.303289       1 actual_state_of_world.go:547] "Failed to update statusUpdateNeeded field in actual state of world" err="Failed to set statusUpdateNeeded to needed true, because nodeName=\"multinode-776386-m02\" does not exist"
	I0731 10:53:36.309887       1 range_allocator.go:380] "Set node PodCIDR" node="multinode-776386-m02" podCIDRs=[10.244.1.0/24]
	I0731 10:53:36.312704       1 event.go:307] "Event occurred" object="kube-system/kube-proxy" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-95tkz"
	I0731 10:53:36.314242       1 event.go:307] "Event occurred" object="kube-system/kindnet" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kindnet-mnhhf"
	I0731 10:53:37.635575       1 event.go:307] "Event occurred" object="multinode-776386-m02" fieldPath="" kind="Node" apiVersion="v1" type="Normal" reason="RegisteredNode" message="Node multinode-776386-m02 event: Registered Node multinode-776386-m02 in Controller"
	I0731 10:53:37.635621       1 node_lifecycle_controller.go:875] "Missing timestamp for Node. Assuming now as a timestamp" node="multinode-776386-m02"
	W0731 10:54:08.648351       1 topologycache.go:232] Can't get CPU or zone information for multinode-776386-m02 node
	I0731 10:54:11.026486       1 event.go:307] "Event occurred" object="default/busybox" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set busybox-67b7f59bb to 2"
	I0731 10:54:11.033578       1 event.go:307] "Event occurred" object="default/busybox-67b7f59bb" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox-67b7f59bb-tvf5p"
	I0731 10:54:11.105211       1 event.go:307] "Event occurred" object="default/busybox-67b7f59bb" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox-67b7f59bb-trlh5"
	I0731 10:54:12.648339       1 event.go:307] "Event occurred" object="default/busybox-67b7f59bb-tvf5p" fieldPath="" kind="Pod" apiVersion="" type="Normal" reason="TaintManagerEviction" message="Cancelling deletion of Pod default/busybox-67b7f59bb-tvf5p"
	
	* 
	* ==> kube-proxy [d296d17e60eff0f40e5095ae1e7dc24da34746772b86a9b71cf86a5e151476b1] <==
	* I0731 10:52:49.335299       1 node.go:141] Successfully retrieved node IP: 192.168.58.2
	I0731 10:52:49.335390       1 server_others.go:110] "Detected node IP" address="192.168.58.2"
	I0731 10:52:49.335414       1 server_others.go:554] "Using iptables proxy"
	I0731 10:52:49.424580       1 server_others.go:192] "Using iptables Proxier"
	I0731 10:52:49.424626       1 server_others.go:199] "kube-proxy running in dual-stack mode" ipFamily=IPv4
	I0731 10:52:49.424637       1 server_others.go:200] "Creating dualStackProxier for iptables"
	I0731 10:52:49.424654       1 server_others.go:484] "Detect-local-mode set to ClusterCIDR, but no IPv6 cluster CIDR defined, defaulting to no-op detect-local for IPv6"
	I0731 10:52:49.424690       1 proxier.go:253] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
	I0731 10:52:49.425318       1 server.go:658] "Version info" version="v1.27.3"
	I0731 10:52:49.425335       1 server.go:660] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
	I0731 10:52:49.425880       1 config.go:188] "Starting service config controller"
	I0731 10:52:49.425907       1 shared_informer.go:311] Waiting for caches to sync for service config
	I0731 10:52:49.425925       1 config.go:97] "Starting endpoint slice config controller"
	I0731 10:52:49.425937       1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
	I0731 10:52:49.426402       1 config.go:315] "Starting node config controller"
	I0731 10:52:49.426414       1 shared_informer.go:311] Waiting for caches to sync for node config
	I0731 10:52:49.526448       1 shared_informer.go:318] Caches are synced for endpoint slice config
	I0731 10:52:49.526546       1 shared_informer.go:318] Caches are synced for node config
	I0731 10:52:49.526550       1 shared_informer.go:318] Caches are synced for service config
	
	* 
	* ==> kube-scheduler [6e735c501188bc64127298a67d4b164a34cea93296714c1f18b0f5e45432a42f] <==
	* W0731 10:52:31.515560       1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
	E0731 10:52:31.515576       1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
	E0731 10:52:31.515577       1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
	E0731 10:52:31.515583       1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
	W0731 10:52:31.515599       1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
	E0731 10:52:31.515622       1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
	W0731 10:52:31.515649       1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
	E0731 10:52:31.515664       1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
	W0731 10:52:31.515685       1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
	E0731 10:52:31.515700       1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
	W0731 10:52:31.515790       1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
	W0731 10:52:31.515845       1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
	W0731 10:52:31.515853       1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
	E0731 10:52:31.515864       1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
	E0731 10:52:31.515868       1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
	E0731 10:52:31.515878       1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
	W0731 10:52:31.515816       1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
	E0731 10:52:31.515907       1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
	W0731 10:52:31.515974       1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
	E0731 10:52:31.515991       1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
	W0731 10:52:32.374177       1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
	E0731 10:52:32.374223       1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
	W0731 10:52:32.483835       1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
	E0731 10:52:32.483864       1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
	I0731 10:52:33.110535       1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
	
	* 
	* ==> kubelet <==
	* Jul 31 10:52:47 multinode-776386 kubelet[1601]: I0731 10:52:47.834541    1601 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/e086520b-af9b-4c2e-adc1-cecdf0026890-lib-modules\") pod \"kube-proxy-59xqp\" (UID: \"e086520b-af9b-4c2e-adc1-cecdf0026890\") " pod="kube-system/kube-proxy-59xqp"
	Jul 31 10:52:47 multinode-776386 kubelet[1601]: E0731 10:52:47.940435    1601 projected.go:292] Couldn't get configMap kube-system/kube-root-ca.crt: configmap "kube-root-ca.crt" not found
	Jul 31 10:52:47 multinode-776386 kubelet[1601]: E0731 10:52:47.940471    1601 projected.go:198] Error preparing data for projected volume kube-api-access-v975x for pod kube-system/kube-proxy-59xqp: configmap "kube-root-ca.crt" not found
	Jul 31 10:52:47 multinode-776386 kubelet[1601]: E0731 10:52:47.940512    1601 projected.go:292] Couldn't get configMap kube-system/kube-root-ca.crt: configmap "kube-root-ca.crt" not found
	Jul 31 10:52:47 multinode-776386 kubelet[1601]: E0731 10:52:47.940531    1601 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e086520b-af9b-4c2e-adc1-cecdf0026890-kube-api-access-v975x podName:e086520b-af9b-4c2e-adc1-cecdf0026890 nodeName:}" failed. No retries permitted until 2023-07-31 10:52:48.440506234 +0000 UTC m=+14.071940032 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-v975x" (UniqueName: "kubernetes.io/projected/e086520b-af9b-4c2e-adc1-cecdf0026890-kube-api-access-v975x") pod "kube-proxy-59xqp" (UID: "e086520b-af9b-4c2e-adc1-cecdf0026890") : configmap "kube-root-ca.crt" not found
	Jul 31 10:52:47 multinode-776386 kubelet[1601]: E0731 10:52:47.940540    1601 projected.go:198] Error preparing data for projected volume kube-api-access-jvlkh for pod kube-system/kindnet-zrs4n: configmap "kube-root-ca.crt" not found
	Jul 31 10:52:47 multinode-776386 kubelet[1601]: E0731 10:52:47.940600    1601 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f8716d13-387d-4ea9-a4f0-db398d7e89d8-kube-api-access-jvlkh podName:f8716d13-387d-4ea9-a4f0-db398d7e89d8 nodeName:}" failed. No retries permitted until 2023-07-31 10:52:48.440580514 +0000 UTC m=+14.072014312 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-jvlkh" (UniqueName: "kubernetes.io/projected/f8716d13-387d-4ea9-a4f0-db398d7e89d8-kube-api-access-jvlkh") pod "kindnet-zrs4n" (UID: "f8716d13-387d-4ea9-a4f0-db398d7e89d8") : configmap "kube-root-ca.crt" not found
	Jul 31 10:52:48 multinode-776386 kubelet[1601]: W0731 10:52:48.827012    1601 manager.go:1159] Failed to process watch event {EventType:0 Name:/docker/656908c1a0b09695a6320c09f205c4cc49aa9f1eeb5bc93be8b593553d25e518/crio-325ded3790f8198e3df729188554a2d03e3341e441a9da648798d3fcff30d2c9 WatchSource:0}: Error finding container 325ded3790f8198e3df729188554a2d03e3341e441a9da648798d3fcff30d2c9: Status 404 returned error can't find the container with id 325ded3790f8198e3df729188554a2d03e3341e441a9da648798d3fcff30d2c9
	Jul 31 10:52:49 multinode-776386 kubelet[1601]: I0731 10:52:49.615653    1601 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kindnet-zrs4n" podStartSLOduration=2.615623554 podCreationTimestamp="2023-07-31 10:52:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2023-07-31 10:52:49.615474159 +0000 UTC m=+15.246907966" watchObservedRunningTime="2023-07-31 10:52:49.615623554 +0000 UTC m=+15.247057359"
	Jul 31 10:52:49 multinode-776386 kubelet[1601]: I0731 10:52:49.624354    1601 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kube-proxy-59xqp" podStartSLOduration=2.624309368 podCreationTimestamp="2023-07-31 10:52:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2023-07-31 10:52:49.624183775 +0000 UTC m=+15.255617581" watchObservedRunningTime="2023-07-31 10:52:49.624309368 +0000 UTC m=+15.255743174"
	Jul 31 10:53:19 multinode-776386 kubelet[1601]: I0731 10:53:19.721555    1601 kubelet_node_status.go:493] "Fast updating node status as it just became ready"
	Jul 31 10:53:19 multinode-776386 kubelet[1601]: I0731 10:53:19.741965    1601 topology_manager.go:212] "Topology Admit Handler"
	Jul 31 10:53:19 multinode-776386 kubelet[1601]: I0731 10:53:19.743362    1601 topology_manager.go:212] "Topology Admit Handler"
	Jul 31 10:53:19 multinode-776386 kubelet[1601]: I0731 10:53:19.889636    1601 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ck6sb\" (UniqueName: \"kubernetes.io/projected/fcb57c8f-9276-4e70-a275-2865ac997394-kube-api-access-ck6sb\") pod \"coredns-5d78c9869d-w86c5\" (UID: \"fcb57c8f-9276-4e70-a275-2865ac997394\") " pod="kube-system/coredns-5d78c9869d-w86c5"
	Jul 31 10:53:19 multinode-776386 kubelet[1601]: I0731 10:53:19.889702    1601 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/c0c13dc5-a5eb-4156-af22-1a95ae2eedd9-tmp\") pod \"storage-provisioner\" (UID: \"c0c13dc5-a5eb-4156-af22-1a95ae2eedd9\") " pod="kube-system/storage-provisioner"
	Jul 31 10:53:19 multinode-776386 kubelet[1601]: I0731 10:53:19.889731    1601 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fcb57c8f-9276-4e70-a275-2865ac997394-config-volume\") pod \"coredns-5d78c9869d-w86c5\" (UID: \"fcb57c8f-9276-4e70-a275-2865ac997394\") " pod="kube-system/coredns-5d78c9869d-w86c5"
	Jul 31 10:53:19 multinode-776386 kubelet[1601]: I0731 10:53:19.889791    1601 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttbbm\" (UniqueName: \"kubernetes.io/projected/c0c13dc5-a5eb-4156-af22-1a95ae2eedd9-kube-api-access-ttbbm\") pod \"storage-provisioner\" (UID: \"c0c13dc5-a5eb-4156-af22-1a95ae2eedd9\") " pod="kube-system/storage-provisioner"
	Jul 31 10:53:20 multinode-776386 kubelet[1601]: W0731 10:53:20.091068    1601 manager.go:1159] Failed to process watch event {EventType:0 Name:/docker/656908c1a0b09695a6320c09f205c4cc49aa9f1eeb5bc93be8b593553d25e518/crio-33ddbf16b269627870fbb99833c45f963de5f5249a5f8366a472552c88692233 WatchSource:0}: Error finding container 33ddbf16b269627870fbb99833c45f963de5f5249a5f8366a472552c88692233: Status 404 returned error can't find the container with id 33ddbf16b269627870fbb99833c45f963de5f5249a5f8366a472552c88692233
	Jul 31 10:53:20 multinode-776386 kubelet[1601]: W0731 10:53:20.091377    1601 manager.go:1159] Failed to process watch event {EventType:0 Name:/docker/656908c1a0b09695a6320c09f205c4cc49aa9f1eeb5bc93be8b593553d25e518/crio-02bbc97b3a660fcb5dd79720e67830dd226dd403cb7ac6ddafc7a98bf49d09f5 WatchSource:0}: Error finding container 02bbc97b3a660fcb5dd79720e67830dd226dd403cb7ac6ddafc7a98bf49d09f5: Status 404 returned error can't find the container with id 02bbc97b3a660fcb5dd79720e67830dd226dd403cb7ac6ddafc7a98bf49d09f5
	Jul 31 10:53:20 multinode-776386 kubelet[1601]: I0731 10:53:20.667561    1601 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=31.667516235 podCreationTimestamp="2023-07-31 10:52:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2023-07-31 10:53:20.667358135 +0000 UTC m=+46.298791959" watchObservedRunningTime="2023-07-31 10:53:20.667516235 +0000 UTC m=+46.298950044"
	Jul 31 10:54:11 multinode-776386 kubelet[1601]: I0731 10:54:11.062601    1601 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/coredns-5d78c9869d-w86c5" podStartSLOduration=83.062545125 podCreationTimestamp="2023-07-31 10:52:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2023-07-31 10:53:20.675867181 +0000 UTC m=+46.307300986" watchObservedRunningTime="2023-07-31 10:54:11.062545125 +0000 UTC m=+96.693978931"
	Jul 31 10:54:11 multinode-776386 kubelet[1601]: I0731 10:54:11.062758    1601 topology_manager.go:212] "Topology Admit Handler"
	Jul 31 10:54:11 multinode-776386 kubelet[1601]: I0731 10:54:11.162536    1601 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vf2lt\" (UniqueName: \"kubernetes.io/projected/125622f8-e899-4143-bb27-f36bbd3892ba-kube-api-access-vf2lt\") pod \"busybox-67b7f59bb-trlh5\" (UID: \"125622f8-e899-4143-bb27-f36bbd3892ba\") " pod="default/busybox-67b7f59bb-trlh5"
	Jul 31 10:54:11 multinode-776386 kubelet[1601]: W0731 10:54:11.407133    1601 manager.go:1159] Failed to process watch event {EventType:0 Name:/docker/656908c1a0b09695a6320c09f205c4cc49aa9f1eeb5bc93be8b593553d25e518/crio-ec3edc56c8746a9e0cb7957dee198b30690ae358d25ff0ab80b41cc23ffbc033 WatchSource:0}: Error finding container ec3edc56c8746a9e0cb7957dee198b30690ae358d25ff0ab80b41cc23ffbc033: Status 404 returned error can't find the container with id ec3edc56c8746a9e0cb7957dee198b30690ae358d25ff0ab80b41cc23ffbc033
	Jul 31 10:54:12 multinode-776386 kubelet[1601]: I0731 10:54:12.756339    1601 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="default/busybox-67b7f59bb-trlh5" podStartSLOduration=1.2011684790000001 podCreationTimestamp="2023-07-31 10:54:11 +0000 UTC" firstStartedPulling="2023-07-31 10:54:11.411150627 +0000 UTC m=+97.042584426" lastFinishedPulling="2023-07-31 10:54:11.966270932 +0000 UTC m=+97.597704924" observedRunningTime="2023-07-31 10:54:12.756128924 +0000 UTC m=+98.387562731" watchObservedRunningTime="2023-07-31 10:54:12.756288977 +0000 UTC m=+98.387722787"
	

                                                
                                                
-- /stdout --
helpers_test.go:254: (dbg) Run:  out/minikube-linux-amd64 status --format={{.APIServer}} -p multinode-776386 -n multinode-776386
helpers_test.go:261: (dbg) Run:  kubectl --context multinode-776386 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:285: <<< TestMultiNode/serial/PingHostFrom2Pods FAILED: end of post-mortem logs <<<
helpers_test.go:286: ---------------------/post-mortem---------------------------------
--- FAIL: TestMultiNode/serial/PingHostFrom2Pods (3.23s)

                                                
                                    
x
+
TestRunningBinaryUpgrade (64.2s)

                                                
                                                
=== RUN   TestRunningBinaryUpgrade
=== PAUSE TestRunningBinaryUpgrade

                                                
                                                

                                                
                                                
=== CONT  TestRunningBinaryUpgrade
version_upgrade_test.go:132: (dbg) Run:  /tmp/minikube-v1.9.0.734396397.exe start -p running-upgrade-371853 --memory=2200 --vm-driver=docker  --container-runtime=crio
version_upgrade_test.go:132: (dbg) Done: /tmp/minikube-v1.9.0.734396397.exe start -p running-upgrade-371853 --memory=2200 --vm-driver=docker  --container-runtime=crio: (59.324880231s)
version_upgrade_test.go:142: (dbg) Run:  out/minikube-linux-amd64 start -p running-upgrade-371853 --memory=2200 --alsologtostderr -v=1 --driver=docker  --container-runtime=crio
version_upgrade_test.go:142: (dbg) Non-zero exit: out/minikube-linux-amd64 start -p running-upgrade-371853 --memory=2200 --alsologtostderr -v=1 --driver=docker  --container-runtime=crio: exit status 90 (1.987815307s)

                                                
                                                
-- stdout --
	* [running-upgrade-371853] minikube v1.31.1 on Ubuntu 20.04 (kvm/amd64)
	  - MINIKUBE_LOCATION=16969
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/16969-5799/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/16969-5799/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-amd64
	  - MINIKUBE_FORCE_SYSTEMD=
	* Kubernetes 1.27.3 is now available. If you would like to upgrade, specify: --kubernetes-version=v1.27.3
	* Using the docker driver based on existing profile
	* Starting control plane node running-upgrade-371853 in cluster running-upgrade-371853
	* Pulling base image ...
	* Updating the running docker "running-upgrade-371853" container ...
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	I0731 11:07:04.851736  193139 out.go:296] Setting OutFile to fd 1 ...
	I0731 11:07:04.851888  193139 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I0731 11:07:04.851903  193139 out.go:309] Setting ErrFile to fd 2...
	I0731 11:07:04.851910  193139 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I0731 11:07:04.852105  193139 root.go:338] Updating PATH: /home/jenkins/minikube-integration/16969-5799/.minikube/bin
	I0731 11:07:04.852617  193139 out.go:303] Setting JSON to false
	I0731 11:07:04.853984  193139 start.go:128] hostinfo: {"hostname":"ubuntu-20-agent-15","uptime":2977,"bootTime":1690798648,"procs":516,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1038-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
	I0731 11:07:04.854044  193139 start.go:138] virtualization: kvm guest
	I0731 11:07:04.856249  193139 out.go:177] * [running-upgrade-371853] minikube v1.31.1 on Ubuntu 20.04 (kvm/amd64)
	I0731 11:07:04.857866  193139 out.go:177]   - MINIKUBE_LOCATION=16969
	I0731 11:07:04.857921  193139 notify.go:220] Checking for updates...
	I0731 11:07:04.859282  193139 out.go:177]   - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I0731 11:07:04.860830  193139 out.go:177]   - KUBECONFIG=/home/jenkins/minikube-integration/16969-5799/kubeconfig
	I0731 11:07:04.862284  193139 out.go:177]   - MINIKUBE_HOME=/home/jenkins/minikube-integration/16969-5799/.minikube
	I0731 11:07:04.863689  193139 out.go:177]   - MINIKUBE_BIN=out/minikube-linux-amd64
	I0731 11:07:04.865004  193139 out.go:177]   - MINIKUBE_FORCE_SYSTEMD=
	I0731 11:07:04.866671  193139 config.go:182] Loaded profile config "running-upgrade-371853": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.18.0
	I0731 11:07:04.866703  193139 start_flags.go:695] config upgrade: KicBaseImage=gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631
	I0731 11:07:04.868446  193139 out.go:177] * Kubernetes 1.27.3 is now available. If you would like to upgrade, specify: --kubernetes-version=v1.27.3
	I0731 11:07:04.869750  193139 driver.go:373] Setting default libvirt URI to qemu:///system
	I0731 11:07:04.892962  193139 docker.go:121] docker version: linux-24.0.5:Docker Engine - Community
	I0731 11:07:04.893033  193139 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0731 11:07:04.952138  193139 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:4 ContainersRunning:4 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:95 OomKillDisable:true NGoroutines:80 SystemTime:2023-07-31 11:07:04.943929393 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1038-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archi
tecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33648062464 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-15 Labels:[] ExperimentalBuild:false ServerVersion:24.0.5 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:3dce8eb055cbb6872793272b4f20ed16117344f8 Expected:3dce8eb055cbb6872793272b4f20ed16117344f8} RuncCommit:{ID:v1.1.7-0-g860f061 Expected:v1.1.7-0-g860f061} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.20.2] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
	I0731 11:07:04.952235  193139 docker.go:294] overlay module found
	I0731 11:07:04.954823  193139 out.go:177] * Using the docker driver based on existing profile
	I0731 11:07:04.956240  193139 start.go:298] selected driver: docker
	I0731 11:07:04.956255  193139 start.go:898] validating driver "docker" against &{Name:running-upgrade-371853 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 Memory:2200 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:0 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser: SSHKey: SSHPort:0 KubernetesConfig:{KubernetesVersion:v1.18.0 ClusterName:running-upgrade-371853 Namespace: APIServerName:minikubeCA APIServerNames:[] API
ServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:kubeadm Key:pod-network-cidr Value:10.244.0.0/16}] ShouldLoadCachedImages:true EnableDefaultCNI:true CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name:m01 IP:172.17.0.2 Port:8443 KubernetesVersion:v1.18.0 ContainerRuntime: ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[] StartHostTimeout:0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString: Mount9PVersion: MountGID: MountIP: MountMSize:0 MountOptions:[] MountPort:0 MountType: MountUID: BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: So
cketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0}
	I0731 11:07:04.956354  193139 start.go:909] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
	I0731 11:07:04.957419  193139 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0731 11:07:05.028682  193139 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:4 ContainersRunning:4 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:95 OomKillDisable:true NGoroutines:80 SystemTime:2023-07-31 11:07:05.020263147 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1038-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archi
tecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33648062464 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-15 Labels:[] ExperimentalBuild:false ServerVersion:24.0.5 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:3dce8eb055cbb6872793272b4f20ed16117344f8 Expected:3dce8eb055cbb6872793272b4f20ed16117344f8} RuncCommit:{ID:v1.1.7-0-g860f061 Expected:v1.1.7-0-g860f061} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.20.2] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
	I0731 11:07:05.028988  193139 cni.go:84] Creating CNI manager for ""
	I0731 11:07:05.029006  193139 cni.go:129] EnableDefaultCNI is true, recommending bridge
	I0731 11:07:05.029015  193139 start_flags.go:319] config:
	{Name:running-upgrade-371853 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 Memory:2200 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:0 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser: SSHKey: SSHPort:0 KubernetesConfig:{KubernetesVersion:v1.18.0 ClusterName:running-upgrade-371853 Namespace: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlu
gin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:kubeadm Key:pod-network-cidr Value:10.244.0.0/16}] ShouldLoadCachedImages:true EnableDefaultCNI:true CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name:m01 IP:172.17.0.2 Port:8443 KubernetesVersion:v1.18.0 ContainerRuntime: ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[] StartHostTimeout:0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString: Mount9PVersion: MountGID: MountIP: MountMSize:0 MountOptions:[] MountPort:0 MountType: MountUID: BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0}
	I0731 11:07:05.030926  193139 out.go:177] * Starting control plane node running-upgrade-371853 in cluster running-upgrade-371853
	I0731 11:07:05.032052  193139 cache.go:122] Beginning downloading kic base image for docker with crio
	I0731 11:07:05.033431  193139 out.go:177] * Pulling base image ...
	I0731 11:07:05.034702  193139 preload.go:132] Checking if preload exists for k8s version v1.18.0 and runtime crio
	I0731 11:07:05.034736  193139 image.go:79] Checking for gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 in local docker daemon
	I0731 11:07:05.050917  193139 image.go:83] Found gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 in local docker daemon, skipping pull
	I0731 11:07:05.050940  193139 cache.go:145] gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 exists in daemon, skipping load
	W0731 11:07:05.070806  193139 preload.go:115] https://storage.googleapis.com/minikube-preloaded-volume-tarballs/v18/v1.18.0/preloaded-images-k8s-v18-v1.18.0-cri-o-overlay-amd64.tar.lz4 status code: 404
	I0731 11:07:05.070939  193139 profile.go:148] Saving config to /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/running-upgrade-371853/config.json ...
	I0731 11:07:05.071058  193139 cache.go:107] acquiring lock: {Name:mkd056b10e42cfab4dad12d1164d9013dea375ed Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0731 11:07:05.071067  193139 cache.go:107] acquiring lock: {Name:mk25285afec127bc86d07ebf6c026e302ef46776 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0731 11:07:05.071097  193139 cache.go:107] acquiring lock: {Name:mk3b96290190a97fee50b550589aba2007b58fec Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0731 11:07:05.071135  193139 cache.go:107] acquiring lock: {Name:mkdee17f59b804f8a793a7d20e36bf89565cdbf0 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0731 11:07:05.071157  193139 cache.go:115] /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.18.0 exists
	I0731 11:07:05.071165  193139 cache.go:115] /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/coredns_1.6.7 exists
	I0731 11:07:05.071167  193139 cache.go:96] cache image "registry.k8s.io/kube-proxy:v1.18.0" -> "/home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.18.0" took 121.052µs
	I0731 11:07:05.071179  193139 cache.go:80] save to tar file registry.k8s.io/kube-proxy:v1.18.0 -> /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.18.0 succeeded
	I0731 11:07:05.071181  193139 cache.go:96] cache image "registry.k8s.io/coredns:1.6.7" -> "/home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/coredns_1.6.7" took 87.263µs
	I0731 11:07:05.071186  193139 cache.go:115] /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.18.0 exists
	I0731 11:07:05.071192  193139 cache.go:80] save to tar file registry.k8s.io/coredns:1.6.7 -> /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/coredns_1.6.7 succeeded
	I0731 11:07:05.071138  193139 cache.go:107] acquiring lock: {Name:mkb7c65b760e9a52dc2f2ff7078ac45013f41787 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0731 11:07:05.071194  193139 cache.go:195] Successfully downloaded all kic artifacts
	I0731 11:07:05.071166  193139 cache.go:107] acquiring lock: {Name:mkbbcc54340a78d66229efc12136c6cc540f82df Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0731 11:07:05.071199  193139 cache.go:107] acquiring lock: {Name:mk57bbb41e84bd86481012518ba5ca6d86ac8a3e Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0731 11:07:05.071209  193139 cache.go:115] /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.18.0 exists
	I0731 11:07:05.071221  193139 cache.go:115] /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.4.3-0 exists
	I0731 11:07:05.071228  193139 cache.go:96] cache image "registry.k8s.io/etcd:3.4.3-0" -> "/home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.4.3-0" took 124.595µs
	I0731 11:07:05.071227  193139 cache.go:96] cache image "registry.k8s.io/kube-controller-manager:v1.18.0" -> "/home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.18.0" took 164.55µs
	I0731 11:07:05.071195  193139 cache.go:96] cache image "registry.k8s.io/kube-apiserver:v1.18.0" -> "/home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.18.0" took 61.181µs
	I0731 11:07:05.071235  193139 cache.go:115] /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.18.0 exists
	I0731 11:07:05.071228  193139 start.go:365] acquiring machines lock for running-upgrade-371853: {Name:mkb87d652d9ab80b46b01a674e5c4f53dbafe4a6 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0731 11:07:05.071241  193139 cache.go:80] save to tar file registry.k8s.io/kube-apiserver:v1.18.0 -> /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.18.0 succeeded
	I0731 11:07:05.071238  193139 cache.go:80] save to tar file registry.k8s.io/kube-controller-manager:v1.18.0 -> /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.18.0 succeeded
	I0731 11:07:05.071245  193139 cache.go:96] cache image "registry.k8s.io/kube-scheduler:v1.18.0" -> "/home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.18.0" took 48.706µs
	I0731 11:07:05.071263  193139 cache.go:115] /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/pause_3.2 exists
	I0731 11:07:05.071267  193139 cache.go:80] save to tar file registry.k8s.io/kube-scheduler:v1.18.0 -> /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.18.0 succeeded
	I0731 11:07:05.071237  193139 cache.go:80] save to tar file registry.k8s.io/etcd:3.4.3-0 -> /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.4.3-0 succeeded
	I0731 11:07:05.071037  193139 cache.go:107] acquiring lock: {Name:mk6dff368363ef234211aaf6a35e33c120b6a6ba Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0731 11:07:05.071272  193139 cache.go:96] cache image "registry.k8s.io/pause:3.2" -> "/home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/pause_3.2" took 141.958µs
	I0731 11:07:05.071288  193139 cache.go:80] save to tar file registry.k8s.io/pause:3.2 -> /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/pause_3.2 succeeded
	I0731 11:07:05.071331  193139 cache.go:115] /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 exists
	I0731 11:07:05.071349  193139 cache.go:96] cache image "gcr.io/k8s-minikube/storage-provisioner:v5" -> "/home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5" took 320.889µs
	I0731 11:07:05.071361  193139 cache.go:80] save to tar file gcr.io/k8s-minikube/storage-provisioner:v5 -> /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 succeeded
	I0731 11:07:05.071368  193139 cache.go:87] Successfully saved all images to host disk.
	I0731 11:07:05.071331  193139 start.go:369] acquired machines lock for "running-upgrade-371853" in 79.006µs
	I0731 11:07:05.071387  193139 start.go:96] Skipping create...Using existing machine configuration
	I0731 11:07:05.071396  193139 fix.go:54] fixHost starting: m01
	I0731 11:07:05.071673  193139 cli_runner.go:164] Run: docker container inspect running-upgrade-371853 --format={{.State.Status}}
	I0731 11:07:05.087143  193139 fix.go:102] recreateIfNeeded on running-upgrade-371853: state=Running err=<nil>
	W0731 11:07:05.087178  193139 fix.go:128] unexpected machine state, will restart: <nil>
	I0731 11:07:05.090021  193139 out.go:177] * Updating the running docker "running-upgrade-371853" container ...
	I0731 11:07:05.091178  193139 machine.go:88] provisioning docker machine ...
	I0731 11:07:05.091216  193139 ubuntu.go:169] provisioning hostname "running-upgrade-371853"
	I0731 11:07:05.091279  193139 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" running-upgrade-371853
	I0731 11:07:05.108484  193139 main.go:141] libmachine: Using SSH client type: native
	I0731 11:07:05.109190  193139 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x80eb00] 0x811ba0 <nil>  [] 0s} 127.0.0.1 32974 <nil> <nil>}
	I0731 11:07:05.109212  193139 main.go:141] libmachine: About to run SSH command:
	sudo hostname running-upgrade-371853 && echo "running-upgrade-371853" | sudo tee /etc/hostname
	I0731 11:07:05.229306  193139 main.go:141] libmachine: SSH cmd err, output: <nil>: running-upgrade-371853
	
	I0731 11:07:05.229408  193139 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" running-upgrade-371853
	I0731 11:07:05.250737  193139 main.go:141] libmachine: Using SSH client type: native
	I0731 11:07:05.251156  193139 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x80eb00] 0x811ba0 <nil>  [] 0s} 127.0.0.1 32974 <nil> <nil>}
	I0731 11:07:05.251181  193139 main.go:141] libmachine: About to run SSH command:
	
			if ! grep -xq '.*\srunning-upgrade-371853' /etc/hosts; then
				if grep -xq '127.0.1.1\s.*' /etc/hosts; then
					sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 running-upgrade-371853/g' /etc/hosts;
				else 
					echo '127.0.1.1 running-upgrade-371853' | sudo tee -a /etc/hosts; 
				fi
			fi
	I0731 11:07:05.353970  193139 main.go:141] libmachine: SSH cmd err, output: <nil>: 
	I0731 11:07:05.354012  193139 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/16969-5799/.minikube CaCertPath:/home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/16969-5799/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/16969-5799/.minikube}
	I0731 11:07:05.354055  193139 ubuntu.go:177] setting up certificates
	I0731 11:07:05.354070  193139 provision.go:83] configureAuth start
	I0731 11:07:05.354136  193139 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" running-upgrade-371853
	I0731 11:07:05.369915  193139 provision.go:138] copyHostCerts
	I0731 11:07:05.369973  193139 exec_runner.go:144] found /home/jenkins/minikube-integration/16969-5799/.minikube/key.pem, removing ...
	I0731 11:07:05.369985  193139 exec_runner.go:203] rm: /home/jenkins/minikube-integration/16969-5799/.minikube/key.pem
	I0731 11:07:05.370047  193139 exec_runner.go:151] cp: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/16969-5799/.minikube/key.pem (1675 bytes)
	I0731 11:07:05.370682  193139 exec_runner.go:144] found /home/jenkins/minikube-integration/16969-5799/.minikube/ca.pem, removing ...
	I0731 11:07:05.370695  193139 exec_runner.go:203] rm: /home/jenkins/minikube-integration/16969-5799/.minikube/ca.pem
	I0731 11:07:05.370732  193139 exec_runner.go:151] cp: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/16969-5799/.minikube/ca.pem (1082 bytes)
	I0731 11:07:05.370804  193139 exec_runner.go:144] found /home/jenkins/minikube-integration/16969-5799/.minikube/cert.pem, removing ...
	I0731 11:07:05.370813  193139 exec_runner.go:203] rm: /home/jenkins/minikube-integration/16969-5799/.minikube/cert.pem
	I0731 11:07:05.370837  193139 exec_runner.go:151] cp: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/16969-5799/.minikube/cert.pem (1123 bytes)
	I0731 11:07:05.370894  193139 provision.go:112] generating server cert: /home/jenkins/minikube-integration/16969-5799/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca-key.pem org=jenkins.running-upgrade-371853 san=[172.17.0.2 127.0.0.1 localhost 127.0.0.1 minikube running-upgrade-371853]
	I0731 11:07:05.461216  193139 provision.go:172] copyRemoteCerts
	I0731 11:07:05.461272  193139 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
	I0731 11:07:05.461311  193139 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" running-upgrade-371853
	I0731 11:07:05.477939  193139 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32974 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/running-upgrade-371853/id_rsa Username:docker}
	I0731 11:07:05.557260  193139 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
	I0731 11:07:05.574053  193139 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/machines/server.pem --> /etc/docker/server.pem (1241 bytes)
	I0731 11:07:05.590150  193139 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
	I0731 11:07:05.605879  193139 provision.go:86] duration metric: configureAuth took 251.789082ms
	I0731 11:07:05.605904  193139 ubuntu.go:193] setting minikube options for container-runtime
	I0731 11:07:05.606101  193139 config.go:182] Loaded profile config "running-upgrade-371853": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.18.0
	I0731 11:07:05.606258  193139 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" running-upgrade-371853
	I0731 11:07:05.622844  193139 main.go:141] libmachine: Using SSH client type: native
	I0731 11:07:05.623230  193139 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x80eb00] 0x811ba0 <nil>  [] 0s} 127.0.0.1 32974 <nil> <nil>}
	I0731 11:07:05.623248  193139 main.go:141] libmachine: About to run SSH command:
	sudo mkdir -p /etc/sysconfig && printf %s "
	CRIO_MINIKUBE_OPTIONS='--insecure-registry 10.96.0.0/12 '
	" | sudo tee /etc/sysconfig/crio.minikube && sudo systemctl restart crio
	I0731 11:07:06.011221  193139 main.go:141] libmachine: SSH cmd err, output: <nil>: 
	CRIO_MINIKUBE_OPTIONS='--insecure-registry 10.96.0.0/12 '
	
	I0731 11:07:06.011255  193139 machine.go:91] provisioned docker machine in 920.06352ms
	I0731 11:07:06.011267  193139 start.go:300] post-start starting for "running-upgrade-371853" (driver="docker")
	I0731 11:07:06.011279  193139 start.go:329] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
	I0731 11:07:06.011337  193139 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
	I0731 11:07:06.011392  193139 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" running-upgrade-371853
	I0731 11:07:06.027525  193139 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32974 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/running-upgrade-371853/id_rsa Username:docker}
	I0731 11:07:06.109236  193139 ssh_runner.go:195] Run: cat /etc/os-release
	I0731 11:07:06.111978  193139 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
	I0731 11:07:06.112015  193139 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
	I0731 11:07:06.112028  193139 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
	I0731 11:07:06.112038  193139 info.go:137] Remote host: Ubuntu 19.10
	I0731 11:07:06.112047  193139 filesync.go:126] Scanning /home/jenkins/minikube-integration/16969-5799/.minikube/addons for local assets ...
	I0731 11:07:06.112107  193139 filesync.go:126] Scanning /home/jenkins/minikube-integration/16969-5799/.minikube/files for local assets ...
	I0731 11:07:06.112206  193139 filesync.go:149] local asset: /home/jenkins/minikube-integration/16969-5799/.minikube/files/etc/ssl/certs/125372.pem -> 125372.pem in /etc/ssl/certs
	I0731 11:07:06.112322  193139 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
	I0731 11:07:06.119900  193139 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/files/etc/ssl/certs/125372.pem --> /etc/ssl/certs/125372.pem (1708 bytes)
	I0731 11:07:06.139485  193139 start.go:303] post-start completed in 128.201585ms
	I0731 11:07:06.139562  193139 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I0731 11:07:06.139606  193139 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" running-upgrade-371853
	I0731 11:07:06.160382  193139 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32974 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/running-upgrade-371853/id_rsa Username:docker}
	I0731 11:07:06.239417  193139 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
	I0731 11:07:06.244179  193139 fix.go:56] fixHost completed within 1.172778504s
	I0731 11:07:06.244201  193139 start.go:83] releasing machines lock for "running-upgrade-371853", held for 1.172820393s
	I0731 11:07:06.244263  193139 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" running-upgrade-371853
	I0731 11:07:06.262751  193139 ssh_runner.go:195] Run: cat /version.json
	I0731 11:07:06.262816  193139 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" running-upgrade-371853
	I0731 11:07:06.262869  193139 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
	I0731 11:07:06.262948  193139 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" running-upgrade-371853
	I0731 11:07:06.279827  193139 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32974 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/running-upgrade-371853/id_rsa Username:docker}
	I0731 11:07:06.283524  193139 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32974 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/running-upgrade-371853/id_rsa Username:docker}
	W0731 11:07:06.387659  193139 start.go:419] Unable to open version.json: cat /version.json: Process exited with status 1
	stdout:
	
	stderr:
	cat: /version.json: No such file or directory
	I0731 11:07:06.387739  193139 ssh_runner.go:195] Run: systemctl --version
	I0731 11:07:06.392075  193139 ssh_runner.go:195] Run: sudo sh -c "podman version >/dev/null"
	I0731 11:07:06.442616  193139 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
	I0731 11:07:06.446892  193139 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "sudo mv {} {}.mk_disabled" ;
	I0731 11:07:06.461067  193139 cni.go:221] loopback cni configuration disabled: "/etc/cni/net.d/*loopback.conf*" found
	I0731 11:07:06.461165  193139 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
	I0731 11:07:06.481877  193139 cni.go:262] disabled [/etc/cni/net.d/100-crio-bridge.conf, /etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
	I0731 11:07:06.481898  193139 start.go:466] detecting cgroup driver to use...
	I0731 11:07:06.481928  193139 detect.go:196] detected "cgroupfs" cgroup driver on host os
	I0731 11:07:06.481960  193139 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
	I0731 11:07:06.503838  193139 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
	I0731 11:07:06.512484  193139 docker.go:196] disabling cri-docker service (if available) ...
	I0731 11:07:06.512524  193139 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
	I0731 11:07:06.520877  193139 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
	I0731 11:07:06.528860  193139 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
	W0731 11:07:06.537284  193139 docker.go:206] Failed to disable socket "cri-docker.socket" (might be ok): sudo systemctl disable cri-docker.socket: Process exited with status 1
	stdout:
	
	stderr:
	Failed to disable unit: Unit file cri-docker.socket does not exist.
	I0731 11:07:06.537333  193139 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
	I0731 11:07:06.604388  193139 docker.go:212] disabling docker service ...
	I0731 11:07:06.604442  193139 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
	I0731 11:07:06.613587  193139 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
	I0731 11:07:06.622312  193139 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
	I0731 11:07:06.688186  193139 ssh_runner.go:195] Run: sudo systemctl mask docker.service
	I0731 11:07:06.761417  193139 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
	I0731 11:07:06.771572  193139 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/crio/crio.sock
	" | sudo tee /etc/crictl.yaml"
	I0731 11:07:06.784879  193139 crio.go:59] configure cri-o to use "registry.k8s.io/pause:3.2" pause image...
	I0731 11:07:06.784941  193139 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|^.*pause_image = .*$|pause_image = "registry.k8s.io/pause:3.2"|' /etc/crio/crio.conf.d/02-crio.conf"
	I0731 11:07:06.794405  193139 out.go:177] 
	W0731 11:07:06.795808  193139 out.go:239] X Exiting due to RUNTIME_ENABLE: Failed to enable container runtime: update pause_image: sh -c "sudo sed -i 's|^.*pause_image = .*$|pause_image = "registry.k8s.io/pause:3.2"|' /etc/crio/crio.conf.d/02-crio.conf": Process exited with status 2
	stdout:
	
	stderr:
	sed: can't read /etc/crio/crio.conf.d/02-crio.conf: No such file or directory
	
	X Exiting due to RUNTIME_ENABLE: Failed to enable container runtime: update pause_image: sh -c "sudo sed -i 's|^.*pause_image = .*$|pause_image = "registry.k8s.io/pause:3.2"|' /etc/crio/crio.conf.d/02-crio.conf": Process exited with status 2
	stdout:
	
	stderr:
	sed: can't read /etc/crio/crio.conf.d/02-crio.conf: No such file or directory
	
	W0731 11:07:06.795825  193139 out.go:239] * 
	* 
	W0731 11:07:06.796689  193139 out.go:239] ╭─────────────────────────────────────────────────────────────────────────────────────────────╮
	│                                                                                             │
	│    * If the above advice does not help, please let us know:                                 │
	│      https://github.com/kubernetes/minikube/issues/new/choose                               │
	│                                                                                             │
	│    * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue.    │
	│                                                                                             │
	╰─────────────────────────────────────────────────────────────────────────────────────────────╯
	╭─────────────────────────────────────────────────────────────────────────────────────────────╮
	│                                                                                             │
	│    * If the above advice does not help, please let us know:                                 │
	│      https://github.com/kubernetes/minikube/issues/new/choose                               │
	│                                                                                             │
	│    * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue.    │
	│                                                                                             │
	╰─────────────────────────────────────────────────────────────────────────────────────────────╯
	I0731 11:07:06.798803  193139 out.go:177] 

                                                
                                                
** /stderr **
version_upgrade_test.go:144: upgrade from v1.9.0 to HEAD failed: out/minikube-linux-amd64 start -p running-upgrade-371853 --memory=2200 --alsologtostderr -v=1 --driver=docker  --container-runtime=crio: exit status 90
panic.go:522: *** TestRunningBinaryUpgrade FAILED at 2023-07-31 11:07:06.816007478 +0000 UTC m=+2012.924378462
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======>  post-mortem[TestRunningBinaryUpgrade]: docker inspect <======
helpers_test.go:231: (dbg) Run:  docker inspect running-upgrade-371853
helpers_test.go:235: (dbg) docker inspect running-upgrade-371853:

                                                
                                                
-- stdout --
	[
	    {
	        "Id": "8ee9f75b5ed867f65e54c775dc4c814b532c3759edd5c56265c52739eb5e9311",
	        "Created": "2023-07-31T11:06:05.817773072Z",
	        "Path": "/usr/local/bin/entrypoint",
	        "Args": [
	            "/sbin/init"
	        ],
	        "State": {
	            "Status": "running",
	            "Running": true,
	            "Paused": false,
	            "Restarting": false,
	            "OOMKilled": false,
	            "Dead": false,
	            "Pid": 184271,
	            "ExitCode": 0,
	            "Error": "",
	            "StartedAt": "2023-07-31T11:06:06.269141584Z",
	            "FinishedAt": "0001-01-01T00:00:00Z"
	        },
	        "Image": "sha256:11589cdc9ef4b67a64cc243dd3cf013e81ad02bbed105fc37dc07aa272044680",
	        "ResolvConfPath": "/var/lib/docker/containers/8ee9f75b5ed867f65e54c775dc4c814b532c3759edd5c56265c52739eb5e9311/resolv.conf",
	        "HostnamePath": "/var/lib/docker/containers/8ee9f75b5ed867f65e54c775dc4c814b532c3759edd5c56265c52739eb5e9311/hostname",
	        "HostsPath": "/var/lib/docker/containers/8ee9f75b5ed867f65e54c775dc4c814b532c3759edd5c56265c52739eb5e9311/hosts",
	        "LogPath": "/var/lib/docker/containers/8ee9f75b5ed867f65e54c775dc4c814b532c3759edd5c56265c52739eb5e9311/8ee9f75b5ed867f65e54c775dc4c814b532c3759edd5c56265c52739eb5e9311-json.log",
	        "Name": "/running-upgrade-371853",
	        "RestartCount": 0,
	        "Driver": "overlay2",
	        "Platform": "linux",
	        "MountLabel": "",
	        "ProcessLabel": "",
	        "AppArmorProfile": "unconfined",
	        "ExecIDs": null,
	        "HostConfig": {
	            "Binds": [
	                "running-upgrade-371853:/var",
	                "/lib/modules:/lib/modules:ro"
	            ],
	            "ContainerIDFile": "",
	            "LogConfig": {
	                "Type": "json-file",
	                "Config": {
	                    "max-size": "100m"
	                }
	            },
	            "NetworkMode": "default",
	            "PortBindings": {
	                "22/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "2376/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "8443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ]
	            },
	            "RestartPolicy": {
	                "Name": "no",
	                "MaximumRetryCount": 0
	            },
	            "AutoRemove": false,
	            "VolumeDriver": "",
	            "VolumesFrom": null,
	            "ConsoleSize": [
	                0,
	                0
	            ],
	            "CapAdd": null,
	            "CapDrop": null,
	            "CgroupnsMode": "host",
	            "Dns": [],
	            "DnsOptions": [],
	            "DnsSearch": [],
	            "ExtraHosts": null,
	            "GroupAdd": null,
	            "IpcMode": "private",
	            "Cgroup": "",
	            "Links": null,
	            "OomScoreAdj": 0,
	            "PidMode": "",
	            "Privileged": true,
	            "PublishAllPorts": false,
	            "ReadonlyRootfs": false,
	            "SecurityOpt": [
	                "seccomp=unconfined",
	                "label=disable"
	            ],
	            "Tmpfs": {
	                "/run": "",
	                "/tmp": ""
	            },
	            "UTSMode": "",
	            "UsernsMode": "",
	            "ShmSize": 67108864,
	            "Runtime": "runc",
	            "Isolation": "",
	            "CpuShares": 0,
	            "Memory": 2306867200,
	            "NanoCpus": 2000000000,
	            "CgroupParent": "",
	            "BlkioWeight": 0,
	            "BlkioWeightDevice": [],
	            "BlkioDeviceReadBps": [],
	            "BlkioDeviceWriteBps": [],
	            "BlkioDeviceReadIOps": [],
	            "BlkioDeviceWriteIOps": [],
	            "CpuPeriod": 0,
	            "CpuQuota": 0,
	            "CpuRealtimePeriod": 0,
	            "CpuRealtimeRuntime": 0,
	            "CpusetCpus": "",
	            "CpusetMems": "",
	            "Devices": [],
	            "DeviceCgroupRules": null,
	            "DeviceRequests": null,
	            "MemoryReservation": 0,
	            "MemorySwap": 4613734400,
	            "MemorySwappiness": null,
	            "OomKillDisable": false,
	            "PidsLimit": null,
	            "Ulimits": null,
	            "CpuCount": 0,
	            "CpuPercent": 0,
	            "IOMaximumIOps": 0,
	            "IOMaximumBandwidth": 0,
	            "MaskedPaths": null,
	            "ReadonlyPaths": null
	        },
	        "GraphDriver": {
	            "Data": {
	                "LowerDir": "/var/lib/docker/overlay2/4c953d0efb81992333064680c5508f12a33ab8521e4f353f4d25740c7fc7f319-init/diff:/var/lib/docker/overlay2/b21c9eab72c74442ca57d7d5c3eecad2a725c71f57243123375192c0cb12de1e/diff:/var/lib/docker/overlay2/49070b8deb88566cd0ae8fe3ee45ef9140ca85805365a39596b82f8818359acf/diff:/var/lib/docker/overlay2/a8e20e7bf7c24ab67e214b00c2e921e73fe8b0d79efe5b6d66c726b01ebde011/diff:/var/lib/docker/overlay2/4abedfd70afa76c8fc4cef2dacc96c08edec119f44559bc66de8aa6fbee097e0/diff:/var/lib/docker/overlay2/8d908e4fb400f644bb02ef5815c58d9579112a6e56cc74733120e286db05797e/diff:/var/lib/docker/overlay2/c98f896ee049bbbe8dead5e9886065a2cb3c10692b88b80d0d5694886d05c39b/diff:/var/lib/docker/overlay2/a0177b18b5bccfb1ac9b4a9586a74330952431c57a26ca000c6b44bc74a450e9/diff:/var/lib/docker/overlay2/8c391fa1e1b46b64ce810cc2845340bdae15286e9735056ad51420eed6ae6636/diff:/var/lib/docker/overlay2/ba0a9b6937a2a6f5fdfee816b757047216eb0b6a5baaf6464edcbf1ae9e2626f/diff:/var/lib/docker/overlay2/016746
3970a95ce465e791cf70b56fdaa11e18062970e287f2468ed243cd10a4/diff:/var/lib/docker/overlay2/4d985a841f19dcd1dff3774c343f6562342edff36f6e51a61121f2bfac7d2654/diff:/var/lib/docker/overlay2/3fde3f4ba896f5d3276058453086875370156367de7c7641fdc5e2681c6ac129/diff:/var/lib/docker/overlay2/053bf3c8ba323cc5922da6ab82c134c423ce504952c63dc85880076e48c96e4c/diff:/var/lib/docker/overlay2/11a9e499b023b1d69489a049cbd748c56a0d0cff739eabfa529d457be5b8ebd2/diff:/var/lib/docker/overlay2/25858ac06f535e72a075c9d0a60bd3c121b6a030e0fd36245b59bc56bd0cd943/diff:/var/lib/docker/overlay2/af0ad5b8ca61fa0d9e747732f0a227fcbfc82375abfc8bcec03192cbaad9c818/diff:/var/lib/docker/overlay2/55cfab0b3505e7aa9da25a3ab42b8aaa129a86f27a500d825b3643631ac061ab/diff:/var/lib/docker/overlay2/37bce0fcc4054a160b8c9b9b33af076051b5014bdd207aeb98874238bff9127c/diff:/var/lib/docker/overlay2/44012bb7d8c28166b9841a6cda9bba59c623cf55be2da59783b1d3ce0f9fdf88/diff:/var/lib/docker/overlay2/99a7dfec06d265f6b5f9385c9dfb4f4985c77e3d837e56ee67d9bf43356e6bf2/diff:/var/lib/d
ocker/overlay2/9d01e77a5bbd5f9afdc8a21e0e8d7daa8a24aec27a8c456a526f91560be6cacf/diff",
	                "MergedDir": "/var/lib/docker/overlay2/4c953d0efb81992333064680c5508f12a33ab8521e4f353f4d25740c7fc7f319/merged",
	                "UpperDir": "/var/lib/docker/overlay2/4c953d0efb81992333064680c5508f12a33ab8521e4f353f4d25740c7fc7f319/diff",
	                "WorkDir": "/var/lib/docker/overlay2/4c953d0efb81992333064680c5508f12a33ab8521e4f353f4d25740c7fc7f319/work"
	            },
	            "Name": "overlay2"
	        },
	        "Mounts": [
	            {
	                "Type": "bind",
	                "Source": "/lib/modules",
	                "Destination": "/lib/modules",
	                "Mode": "ro",
	                "RW": false,
	                "Propagation": "rprivate"
	            },
	            {
	                "Type": "volume",
	                "Name": "running-upgrade-371853",
	                "Source": "/var/lib/docker/volumes/running-upgrade-371853/_data",
	                "Destination": "/var",
	                "Driver": "local",
	                "Mode": "z",
	                "RW": true,
	                "Propagation": ""
	            }
	        ],
	        "Config": {
	            "Hostname": "running-upgrade-371853",
	            "Domainname": "",
	            "User": "root",
	            "AttachStdin": false,
	            "AttachStdout": false,
	            "AttachStderr": false,
	            "ExposedPorts": {
	                "22/tcp": {},
	                "2376/tcp": {},
	                "8443/tcp": {}
	            },
	            "Tty": true,
	            "OpenStdin": false,
	            "StdinOnce": false,
	            "Env": [
	                "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
	                "container=docker"
	            ],
	            "Cmd": null,
	            "Image": "gcr.io/k8s-minikube/kicbase:v0.0.8@sha256:2f3380ebf1bb0c75b0b47160fd4e61b7b8fef0f1f32f9def108d3eada50a7a81",
	            "Volumes": null,
	            "WorkingDir": "",
	            "Entrypoint": [
	                "/usr/local/bin/entrypoint",
	                "/sbin/init"
	            ],
	            "OnBuild": null,
	            "Labels": {
	                "created_by.minikube.sigs.k8s.io": "true",
	                "mode.minikube.sigs.k8s.io": "running-upgrade-371853",
	                "name.minikube.sigs.k8s.io": "running-upgrade-371853",
	                "role.minikube.sigs.k8s.io": ""
	            },
	            "StopSignal": "SIGRTMIN+3"
	        },
	        "NetworkSettings": {
	            "Bridge": "",
	            "SandboxID": "be65ca71f625480d12ccd6db0516b3e21dea4331e2b69d1d3a243da04eb051bc",
	            "HairpinMode": false,
	            "LinkLocalIPv6Address": "",
	            "LinkLocalIPv6PrefixLen": 0,
	            "Ports": {
	                "22/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "32974"
	                    }
	                ],
	                "2376/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "32973"
	                    }
	                ],
	                "8443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "32972"
	                    }
	                ]
	            },
	            "SandboxKey": "/var/run/docker/netns/be65ca71f625",
	            "SecondaryIPAddresses": null,
	            "SecondaryIPv6Addresses": null,
	            "EndpointID": "c8904264a144d5fdacd278b615d97041b4d43b7f9bc5bf2a10b7c570edcfc42e",
	            "Gateway": "172.17.0.1",
	            "GlobalIPv6Address": "",
	            "GlobalIPv6PrefixLen": 0,
	            "IPAddress": "172.17.0.2",
	            "IPPrefixLen": 16,
	            "IPv6Gateway": "",
	            "MacAddress": "02:42:ac:11:00:02",
	            "Networks": {
	                "bridge": {
	                    "IPAMConfig": null,
	                    "Links": null,
	                    "Aliases": null,
	                    "NetworkID": "fc353249a7c55c91673515fe7f97fd268a3f4b3dd893144e861b86fd809037c7",
	                    "EndpointID": "c8904264a144d5fdacd278b615d97041b4d43b7f9bc5bf2a10b7c570edcfc42e",
	                    "Gateway": "172.17.0.1",
	                    "IPAddress": "172.17.0.2",
	                    "IPPrefixLen": 16,
	                    "IPv6Gateway": "",
	                    "GlobalIPv6Address": "",
	                    "GlobalIPv6PrefixLen": 0,
	                    "MacAddress": "02:42:ac:11:00:02",
	                    "DriverOpts": null
	                }
	            }
	        }
	    }
	]

                                                
                                                
-- /stdout --
helpers_test.go:239: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Host}} -p running-upgrade-371853 -n running-upgrade-371853
helpers_test.go:239: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Host}} -p running-upgrade-371853 -n running-upgrade-371853: exit status 4 (308.797046ms)

                                                
                                                
-- stdout --
	Running
	WARNING: Your kubectl is pointing to stale minikube-vm.
	To fix the kubectl context, run `minikube update-context`

                                                
                                                
-- /stdout --
** stderr ** 
	E0731 11:07:07.111195  193717 status.go:415] kubeconfig endpoint: extract IP: "running-upgrade-371853" does not appear in /home/jenkins/minikube-integration/16969-5799/kubeconfig

                                                
                                                
** /stderr **
helpers_test.go:239: status error: exit status 4 (may be ok)
helpers_test.go:241: "running-upgrade-371853" host is not running, skipping log retrieval (state="Running\nWARNING: Your kubectl is pointing to stale minikube-vm.\nTo fix the kubectl context, run `minikube update-context`")
helpers_test.go:175: Cleaning up "running-upgrade-371853" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p running-upgrade-371853
helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p running-upgrade-371853: (2.111554109s)
--- FAIL: TestRunningBinaryUpgrade (64.20s)

                                                
                                    
x
+
TestStoppedBinaryUpgrade/Upgrade (89.53s)

                                                
                                                
=== RUN   TestStoppedBinaryUpgrade/Upgrade
version_upgrade_test.go:195: (dbg) Run:  /tmp/minikube-v1.9.0.3327598081.exe start -p stopped-upgrade-039423 --memory=2200 --vm-driver=docker  --container-runtime=crio
version_upgrade_test.go:195: (dbg) Done: /tmp/minikube-v1.9.0.3327598081.exe start -p stopped-upgrade-039423 --memory=2200 --vm-driver=docker  --container-runtime=crio: (1m20.315657731s)
version_upgrade_test.go:204: (dbg) Run:  /tmp/minikube-v1.9.0.3327598081.exe -p stopped-upgrade-039423 stop
E0731 11:05:56.015165   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/client.crt: no such file or directory
version_upgrade_test.go:204: (dbg) Done: /tmp/minikube-v1.9.0.3327598081.exe -p stopped-upgrade-039423 stop: (3.478662136s)
version_upgrade_test.go:210: (dbg) Run:  out/minikube-linux-amd64 start -p stopped-upgrade-039423 --memory=2200 --alsologtostderr -v=1 --driver=docker  --container-runtime=crio
E0731 11:05:58.881507   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/client.crt: no such file or directory
version_upgrade_test.go:210: (dbg) Non-zero exit: out/minikube-linux-amd64 start -p stopped-upgrade-039423 --memory=2200 --alsologtostderr -v=1 --driver=docker  --container-runtime=crio: exit status 90 (5.73020867s)

                                                
                                                
-- stdout --
	* [stopped-upgrade-039423] minikube v1.31.1 on Ubuntu 20.04 (kvm/amd64)
	  - MINIKUBE_LOCATION=16969
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/16969-5799/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/16969-5799/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-amd64
	  - MINIKUBE_FORCE_SYSTEMD=
	* Kubernetes 1.27.3 is now available. If you would like to upgrade, specify: --kubernetes-version=v1.27.3
	* Using the docker driver based on existing profile
	* Starting control plane node stopped-upgrade-039423 in cluster stopped-upgrade-039423
	* Pulling base image ...
	* Restarting existing docker container for "stopped-upgrade-039423" ...
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	I0731 11:05:56.933125  182250 out.go:296] Setting OutFile to fd 1 ...
	I0731 11:05:56.933261  182250 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I0731 11:05:56.933273  182250 out.go:309] Setting ErrFile to fd 2...
	I0731 11:05:56.933280  182250 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I0731 11:05:56.933502  182250 root.go:338] Updating PATH: /home/jenkins/minikube-integration/16969-5799/.minikube/bin
	I0731 11:05:56.934231  182250 out.go:303] Setting JSON to false
	I0731 11:05:56.935759  182250 start.go:128] hostinfo: {"hostname":"ubuntu-20-agent-15","uptime":2909,"bootTime":1690798648,"procs":483,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1038-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
	I0731 11:05:56.935839  182250 start.go:138] virtualization: kvm guest
	I0731 11:05:56.938067  182250 out.go:177] * [stopped-upgrade-039423] minikube v1.31.1 on Ubuntu 20.04 (kvm/amd64)
	I0731 11:05:56.941026  182250 out.go:177]   - MINIKUBE_LOCATION=16969
	I0731 11:05:56.941122  182250 notify.go:220] Checking for updates...
	I0731 11:05:56.942555  182250 out.go:177]   - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I0731 11:05:56.944012  182250 out.go:177]   - KUBECONFIG=/home/jenkins/minikube-integration/16969-5799/kubeconfig
	I0731 11:05:56.945559  182250 out.go:177]   - MINIKUBE_HOME=/home/jenkins/minikube-integration/16969-5799/.minikube
	I0731 11:05:56.947059  182250 out.go:177]   - MINIKUBE_BIN=out/minikube-linux-amd64
	I0731 11:05:56.948368  182250 out.go:177]   - MINIKUBE_FORCE_SYSTEMD=
	I0731 11:05:56.950117  182250 config.go:182] Loaded profile config "stopped-upgrade-039423": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.18.0
	I0731 11:05:56.950159  182250 start_flags.go:695] config upgrade: KicBaseImage=gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631
	I0731 11:05:56.952269  182250 out.go:177] * Kubernetes 1.27.3 is now available. If you would like to upgrade, specify: --kubernetes-version=v1.27.3
	I0731 11:05:56.953625  182250 driver.go:373] Setting default libvirt URI to qemu:///system
	I0731 11:05:56.987789  182250 docker.go:121] docker version: linux-24.0.5:Docker Engine - Community
	I0731 11:05:56.987860  182250 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0731 11:05:57.065884  182250 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:4 ContainersRunning:3 ContainersPaused:0 ContainersStopped:1 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:69 OomKillDisable:true NGoroutines:82 SystemTime:2023-07-31 11:05:57.053564201 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1038-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archi
tecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33648062464 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-15 Labels:[] ExperimentalBuild:false ServerVersion:24.0.5 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:3dce8eb055cbb6872793272b4f20ed16117344f8 Expected:3dce8eb055cbb6872793272b4f20ed16117344f8} RuncCommit:{ID:v1.1.7-0-g860f061 Expected:v1.1.7-0-g860f061} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.20.2] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
	I0731 11:05:57.065972  182250 docker.go:294] overlay module found
	I0731 11:05:57.067563  182250 out.go:177] * Using the docker driver based on existing profile
	I0731 11:05:57.068759  182250 start.go:298] selected driver: docker
	I0731 11:05:57.068767  182250 start.go:898] validating driver "docker" against &{Name:stopped-upgrade-039423 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 Memory:2200 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:0 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser: SSHKey: SSHPort:0 KubernetesConfig:{KubernetesVersion:v1.18.0 ClusterName:stopped-upgrade-039423 Namespace: APIServerName:minikubeCA APIServerNames:[] API
ServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:kubeadm Key:pod-network-cidr Value:10.244.0.0/16}] ShouldLoadCachedImages:true EnableDefaultCNI:true CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name:m01 IP:172.17.0.2 Port:8443 KubernetesVersion:v1.18.0 ContainerRuntime: ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[] StartHostTimeout:0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString: Mount9PVersion: MountGID: MountIP: MountMSize:0 MountOptions:[] MountPort:0 MountType: MountUID: BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: So
cketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0}
	I0731 11:05:57.068839  182250 start.go:909] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
	I0731 11:05:57.069546  182250 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0731 11:05:57.121250  182250 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:4 ContainersRunning:3 ContainersPaused:0 ContainersStopped:1 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:69 OomKillDisable:true NGoroutines:82 SystemTime:2023-07-31 11:05:57.112130079 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1038-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archi
tecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33648062464 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-15 Labels:[] ExperimentalBuild:false ServerVersion:24.0.5 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:3dce8eb055cbb6872793272b4f20ed16117344f8 Expected:3dce8eb055cbb6872793272b4f20ed16117344f8} RuncCommit:{ID:v1.1.7-0-g860f061 Expected:v1.1.7-0-g860f061} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.20.2] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
	I0731 11:05:57.121526  182250 cni.go:84] Creating CNI manager for ""
	I0731 11:05:57.121543  182250 cni.go:129] EnableDefaultCNI is true, recommending bridge
	I0731 11:05:57.121549  182250 start_flags.go:319] config:
	{Name:stopped-upgrade-039423 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 Memory:2200 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:0 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser: SSHKey: SSHPort:0 KubernetesConfig:{KubernetesVersion:v1.18.0 ClusterName:stopped-upgrade-039423 Namespace: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlu
gin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:kubeadm Key:pod-network-cidr Value:10.244.0.0/16}] ShouldLoadCachedImages:true EnableDefaultCNI:true CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name:m01 IP:172.17.0.2 Port:8443 KubernetesVersion:v1.18.0 ContainerRuntime: ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[] StartHostTimeout:0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString: Mount9PVersion: MountGID: MountIP: MountMSize:0 MountOptions:[] MountPort:0 MountType: MountUID: BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0}
	I0731 11:05:57.124453  182250 out.go:177] * Starting control plane node stopped-upgrade-039423 in cluster stopped-upgrade-039423
	I0731 11:05:57.125722  182250 cache.go:122] Beginning downloading kic base image for docker with crio
	I0731 11:05:57.127018  182250 out.go:177] * Pulling base image ...
	I0731 11:05:57.128248  182250 preload.go:132] Checking if preload exists for k8s version v1.18.0 and runtime crio
	I0731 11:05:57.128356  182250 image.go:79] Checking for gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 in local docker daemon
	I0731 11:05:57.143365  182250 image.go:83] Found gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 in local docker daemon, skipping pull
	I0731 11:05:57.143392  182250 cache.go:145] gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 exists in daemon, skipping load
	W0731 11:05:57.160613  182250 preload.go:115] https://storage.googleapis.com/minikube-preloaded-volume-tarballs/v18/v1.18.0/preloaded-images-k8s-v18-v1.18.0-cri-o-overlay-amd64.tar.lz4 status code: 404
	I0731 11:05:57.160764  182250 profile.go:148] Saving config to /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/stopped-upgrade-039423/config.json ...
	I0731 11:05:57.160843  182250 cache.go:107] acquiring lock: {Name:mk25285afec127bc86d07ebf6c026e302ef46776 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0731 11:05:57.160854  182250 cache.go:107] acquiring lock: {Name:mkbbcc54340a78d66229efc12136c6cc540f82df Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0731 11:05:57.160923  182250 cache.go:107] acquiring lock: {Name:mkb7c65b760e9a52dc2f2ff7078ac45013f41787 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0731 11:05:57.160934  182250 cache.go:107] acquiring lock: {Name:mk57bbb41e84bd86481012518ba5ca6d86ac8a3e Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0731 11:05:57.160941  182250 cache.go:107] acquiring lock: {Name:mkd056b10e42cfab4dad12d1164d9013dea375ed Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0731 11:05:57.161008  182250 cache.go:107] acquiring lock: {Name:mkdee17f59b804f8a793a7d20e36bf89565cdbf0 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0731 11:05:57.161036  182250 cache.go:195] Successfully downloaded all kic artifacts
	I0731 11:05:57.160844  182250 cache.go:107] acquiring lock: {Name:mk6dff368363ef234211aaf6a35e33c120b6a6ba Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0731 11:05:57.161080  182250 image.go:134] retrieving image: registry.k8s.io/kube-proxy:v1.18.0
	I0731 11:05:57.161094  182250 image.go:134] retrieving image: registry.k8s.io/kube-apiserver:v1.18.0
	I0731 11:05:57.161088  182250 cache.go:107] acquiring lock: {Name:mk3b96290190a97fee50b550589aba2007b58fec Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0731 11:05:57.161187  182250 image.go:134] retrieving image: registry.k8s.io/kube-scheduler:v1.18.0
	I0731 11:05:57.161231  182250 cache.go:115] /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 exists
	I0731 11:05:57.161243  182250 cache.go:96] cache image "gcr.io/k8s-minikube/storage-provisioner:v5" -> "/home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5" took 405.437µs
	I0731 11:05:57.161253  182250 cache.go:80] save to tar file gcr.io/k8s-minikube/storage-provisioner:v5 -> /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 succeeded
	I0731 11:05:57.161050  182250 image.go:134] retrieving image: registry.k8s.io/etcd:3.4.3-0
	I0731 11:05:57.161274  182250 image.go:134] retrieving image: registry.k8s.io/kube-controller-manager:v1.18.0
	I0731 11:05:57.161319  182250 image.go:134] retrieving image: registry.k8s.io/coredns:1.6.7
	I0731 11:05:57.161068  182250 start.go:365] acquiring machines lock for stopped-upgrade-039423: {Name:mk0fe340bc5a5613877b930eaa775e202321fbef Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0731 11:05:57.161381  182250 start.go:369] acquired machines lock for "stopped-upgrade-039423" in 43.44µs
	I0731 11:05:57.161397  182250 start.go:96] Skipping create...Using existing machine configuration
	I0731 11:05:57.161403  182250 fix.go:54] fixHost starting: m01
	I0731 11:05:57.161602  182250 cli_runner.go:164] Run: docker container inspect stopped-upgrade-039423 --format={{.State.Status}}
	I0731 11:05:57.161051  182250 image.go:134] retrieving image: registry.k8s.io/pause:3.2
	I0731 11:05:57.162276  182250 image.go:177] daemon lookup for registry.k8s.io/kube-apiserver:v1.18.0: Error response from daemon: No such image: registry.k8s.io/kube-apiserver:v1.18.0
	I0731 11:05:57.162526  182250 image.go:177] daemon lookup for registry.k8s.io/coredns:1.6.7: Error response from daemon: No such image: registry.k8s.io/coredns:1.6.7
	I0731 11:05:57.162599  182250 image.go:177] daemon lookup for registry.k8s.io/kube-scheduler:v1.18.0: Error response from daemon: No such image: registry.k8s.io/kube-scheduler:v1.18.0
	I0731 11:05:57.162606  182250 image.go:177] daemon lookup for registry.k8s.io/kube-proxy:v1.18.0: Error response from daemon: No such image: registry.k8s.io/kube-proxy:v1.18.0
	I0731 11:05:57.162688  182250 image.go:177] daemon lookup for registry.k8s.io/etcd:3.4.3-0: Error response from daemon: No such image: registry.k8s.io/etcd:3.4.3-0
	I0731 11:05:57.162781  182250 image.go:177] daemon lookup for registry.k8s.io/pause:3.2: Error response from daemon: No such image: registry.k8s.io/pause:3.2
	I0731 11:05:57.162933  182250 image.go:177] daemon lookup for registry.k8s.io/kube-controller-manager:v1.18.0: Error response from daemon: No such image: registry.k8s.io/kube-controller-manager:v1.18.0
	I0731 11:05:57.180725  182250 fix.go:102] recreateIfNeeded on stopped-upgrade-039423: state=Stopped err=<nil>
	W0731 11:05:57.180744  182250 fix.go:128] unexpected machine state, will restart: <nil>
	I0731 11:05:57.183646  182250 out.go:177] * Restarting existing docker container for "stopped-upgrade-039423" ...
	I0731 11:05:57.184861  182250 cli_runner.go:164] Run: docker start stopped-upgrade-039423
	I0731 11:05:57.367626  182250 cache.go:162] opening:  /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.4.3-0
	I0731 11:05:57.371134  182250 cache.go:162] opening:  /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.18.0
	I0731 11:05:57.373657  182250 cache.go:162] opening:  /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/pause_3.2
	I0731 11:05:57.389933  182250 cache.go:162] opening:  /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/coredns_1.6.7
	I0731 11:05:57.406598  182250 cache.go:162] opening:  /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.18.0
	I0731 11:05:57.450148  182250 cli_runner.go:164] Run: docker container inspect stopped-upgrade-039423 --format={{.State.Status}}
	I0731 11:05:57.454360  182250 cache.go:162] opening:  /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.18.0
	I0731 11:05:57.472118  182250 cache.go:162] opening:  /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.18.0
	I0731 11:05:57.473667  182250 cache.go:157] /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/pause_3.2 exists
	I0731 11:05:57.473692  182250 cache.go:96] cache image "registry.k8s.io/pause:3.2" -> "/home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/pause_3.2" took 312.851402ms
	I0731 11:05:57.473705  182250 cache.go:80] save to tar file registry.k8s.io/pause:3.2 -> /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/pause_3.2 succeeded
	I0731 11:05:57.483861  182250 kic.go:426] container "stopped-upgrade-039423" state is running.
	I0731 11:05:57.484171  182250 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" stopped-upgrade-039423
	I0731 11:05:57.501092  182250 profile.go:148] Saving config to /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/stopped-upgrade-039423/config.json ...
	I0731 11:05:57.501348  182250 machine.go:88] provisioning docker machine ...
	I0731 11:05:57.501383  182250 ubuntu.go:169] provisioning hostname "stopped-upgrade-039423"
	I0731 11:05:57.501442  182250 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" stopped-upgrade-039423
	I0731 11:05:57.545986  182250 main.go:141] libmachine: Using SSH client type: native
	I0731 11:05:57.546693  182250 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x80eb00] 0x811ba0 <nil>  [] 0s} 127.0.0.1 32971 <nil> <nil>}
	I0731 11:05:57.546715  182250 main.go:141] libmachine: About to run SSH command:
	sudo hostname stopped-upgrade-039423 && echo "stopped-upgrade-039423" | sudo tee /etc/hostname
	I0731 11:05:57.547291  182250 main.go:141] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:37108->127.0.0.1:32971: read: connection reset by peer
	I0731 11:05:58.001355  182250 cache.go:157] /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/coredns_1.6.7 exists
	I0731 11:05:58.001378  182250 cache.go:96] cache image "registry.k8s.io/coredns:1.6.7" -> "/home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/coredns_1.6.7" took 840.293498ms
	I0731 11:05:58.001403  182250 cache.go:80] save to tar file registry.k8s.io/coredns:1.6.7 -> /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/coredns_1.6.7 succeeded
	I0731 11:05:58.215884  182250 cache.go:157] /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.18.0 exists
	I0731 11:05:58.215914  182250 cache.go:96] cache image "registry.k8s.io/kube-controller-manager:v1.18.0" -> "/home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.18.0" took 1.055080051s
	I0731 11:05:58.215929  182250 cache.go:80] save to tar file registry.k8s.io/kube-controller-manager:v1.18.0 -> /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.18.0 succeeded
	I0731 11:05:58.356470  182250 cache.go:157] /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.18.0 exists
	I0731 11:05:58.356504  182250 cache.go:96] cache image "registry.k8s.io/kube-scheduler:v1.18.0" -> "/home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.18.0" took 1.195592348s
	I0731 11:05:58.356525  182250 cache.go:80] save to tar file registry.k8s.io/kube-scheduler:v1.18.0 -> /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.18.0 succeeded
	I0731 11:05:58.436690  182250 cache.go:157] /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.18.0 exists
	I0731 11:05:58.436719  182250 cache.go:96] cache image "registry.k8s.io/kube-apiserver:v1.18.0" -> "/home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.18.0" took 1.275712589s
	I0731 11:05:58.436734  182250 cache.go:80] save to tar file registry.k8s.io/kube-apiserver:v1.18.0 -> /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.18.0 succeeded
	I0731 11:05:59.080603  182250 cache.go:157] /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.18.0 exists
	I0731 11:05:59.080628  182250 cache.go:96] cache image "registry.k8s.io/kube-proxy:v1.18.0" -> "/home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.18.0" took 1.919689866s
	I0731 11:05:59.080642  182250 cache.go:80] save to tar file registry.k8s.io/kube-proxy:v1.18.0 -> /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.18.0 succeeded
	I0731 11:05:59.166200  182250 cache.go:157] /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.4.3-0 exists
	I0731 11:05:59.166231  182250 cache.go:96] cache image "registry.k8s.io/etcd:3.4.3-0" -> "/home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.4.3-0" took 2.005311901s
	I0731 11:05:59.166247  182250 cache.go:80] save to tar file registry.k8s.io/etcd:3.4.3-0 -> /home/jenkins/minikube-integration/16969-5799/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.4.3-0 succeeded
	I0731 11:05:59.166266  182250 cache.go:87] Successfully saved all images to host disk.
	I0731 11:06:00.662951  182250 main.go:141] libmachine: SSH cmd err, output: <nil>: stopped-upgrade-039423
	
	I0731 11:06:00.663032  182250 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" stopped-upgrade-039423
	I0731 11:06:00.679581  182250 main.go:141] libmachine: Using SSH client type: native
	I0731 11:06:00.679975  182250 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x80eb00] 0x811ba0 <nil>  [] 0s} 127.0.0.1 32971 <nil> <nil>}
	I0731 11:06:00.679995  182250 main.go:141] libmachine: About to run SSH command:
	
			if ! grep -xq '.*\sstopped-upgrade-039423' /etc/hosts; then
				if grep -xq '127.0.1.1\s.*' /etc/hosts; then
					sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 stopped-upgrade-039423/g' /etc/hosts;
				else 
					echo '127.0.1.1 stopped-upgrade-039423' | sudo tee -a /etc/hosts; 
				fi
			fi
	I0731 11:06:00.785789  182250 main.go:141] libmachine: SSH cmd err, output: <nil>: 
	I0731 11:06:00.785821  182250 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/16969-5799/.minikube CaCertPath:/home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/16969-5799/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/16969-5799/.minikube}
	I0731 11:06:00.785850  182250 ubuntu.go:177] setting up certificates
	I0731 11:06:00.785860  182250 provision.go:83] configureAuth start
	I0731 11:06:00.785906  182250 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" stopped-upgrade-039423
	I0731 11:06:00.801477  182250 provision.go:138] copyHostCerts
	I0731 11:06:00.801530  182250 exec_runner.go:144] found /home/jenkins/minikube-integration/16969-5799/.minikube/ca.pem, removing ...
	I0731 11:06:00.801538  182250 exec_runner.go:203] rm: /home/jenkins/minikube-integration/16969-5799/.minikube/ca.pem
	I0731 11:06:00.801606  182250 exec_runner.go:151] cp: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/16969-5799/.minikube/ca.pem (1082 bytes)
	I0731 11:06:00.801685  182250 exec_runner.go:144] found /home/jenkins/minikube-integration/16969-5799/.minikube/cert.pem, removing ...
	I0731 11:06:00.801693  182250 exec_runner.go:203] rm: /home/jenkins/minikube-integration/16969-5799/.minikube/cert.pem
	I0731 11:06:00.801716  182250 exec_runner.go:151] cp: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/16969-5799/.minikube/cert.pem (1123 bytes)
	I0731 11:06:00.801767  182250 exec_runner.go:144] found /home/jenkins/minikube-integration/16969-5799/.minikube/key.pem, removing ...
	I0731 11:06:00.801775  182250 exec_runner.go:203] rm: /home/jenkins/minikube-integration/16969-5799/.minikube/key.pem
	I0731 11:06:00.801795  182250 exec_runner.go:151] cp: /home/jenkins/minikube-integration/16969-5799/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/16969-5799/.minikube/key.pem (1675 bytes)
	I0731 11:06:00.801841  182250 provision.go:112] generating server cert: /home/jenkins/minikube-integration/16969-5799/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca-key.pem org=jenkins.stopped-upgrade-039423 san=[172.17.0.2 127.0.0.1 localhost 127.0.0.1 minikube stopped-upgrade-039423]
	I0731 11:06:01.089290  182250 provision.go:172] copyRemoteCerts
	I0731 11:06:01.089357  182250 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
	I0731 11:06:01.089389  182250 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" stopped-upgrade-039423
	I0731 11:06:01.106403  182250 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32971 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/stopped-upgrade-039423/id_rsa Username:docker}
	I0731 11:06:01.189235  182250 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
	I0731 11:06:01.205760  182250 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/machines/server.pem --> /etc/docker/server.pem (1241 bytes)
	I0731 11:06:01.221831  182250 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
	I0731 11:06:01.237651  182250 provision.go:86] duration metric: configureAuth took 451.778701ms
	I0731 11:06:01.237674  182250 ubuntu.go:193] setting minikube options for container-runtime
	I0731 11:06:01.237837  182250 config.go:182] Loaded profile config "stopped-upgrade-039423": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.18.0
	I0731 11:06:01.237938  182250 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" stopped-upgrade-039423
	I0731 11:06:01.254992  182250 main.go:141] libmachine: Using SSH client type: native
	I0731 11:06:01.255396  182250 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x80eb00] 0x811ba0 <nil>  [] 0s} 127.0.0.1 32971 <nil> <nil>}
	I0731 11:06:01.255420  182250 main.go:141] libmachine: About to run SSH command:
	sudo mkdir -p /etc/sysconfig && printf %s "
	CRIO_MINIKUBE_OPTIONS='--insecure-registry 10.96.0.0/12 '
	" | sudo tee /etc/sysconfig/crio.minikube && sudo systemctl restart crio
	I0731 11:06:01.849484  182250 main.go:141] libmachine: SSH cmd err, output: <nil>: 
	CRIO_MINIKUBE_OPTIONS='--insecure-registry 10.96.0.0/12 '
	
	I0731 11:06:01.849512  182250 machine.go:91] provisioned docker machine in 4.348145506s
	I0731 11:06:01.849522  182250 start.go:300] post-start starting for "stopped-upgrade-039423" (driver="docker")
	I0731 11:06:01.849531  182250 start.go:329] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
	I0731 11:06:01.849585  182250 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
	I0731 11:06:01.849617  182250 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" stopped-upgrade-039423
	I0731 11:06:01.866083  182250 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32971 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/stopped-upgrade-039423/id_rsa Username:docker}
	I0731 11:06:01.949178  182250 ssh_runner.go:195] Run: cat /etc/os-release
	I0731 11:06:01.951790  182250 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
	I0731 11:06:01.951809  182250 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
	I0731 11:06:01.951817  182250 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
	I0731 11:06:01.951823  182250 info.go:137] Remote host: Ubuntu 19.10
	I0731 11:06:01.951832  182250 filesync.go:126] Scanning /home/jenkins/minikube-integration/16969-5799/.minikube/addons for local assets ...
	I0731 11:06:01.951875  182250 filesync.go:126] Scanning /home/jenkins/minikube-integration/16969-5799/.minikube/files for local assets ...
	I0731 11:06:01.951940  182250 filesync.go:149] local asset: /home/jenkins/minikube-integration/16969-5799/.minikube/files/etc/ssl/certs/125372.pem -> 125372.pem in /etc/ssl/certs
	I0731 11:06:01.952030  182250 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
	I0731 11:06:01.957976  182250 ssh_runner.go:362] scp /home/jenkins/minikube-integration/16969-5799/.minikube/files/etc/ssl/certs/125372.pem --> /etc/ssl/certs/125372.pem (1708 bytes)
	I0731 11:06:01.973971  182250 start.go:303] post-start completed in 124.438032ms
	I0731 11:06:01.974037  182250 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I0731 11:06:01.974071  182250 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" stopped-upgrade-039423
	I0731 11:06:01.989855  182250 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32971 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/stopped-upgrade-039423/id_rsa Username:docker}
	I0731 11:06:02.066365  182250 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
	I0731 11:06:02.069942  182250 fix.go:56] fixHost completed within 4.90853381s
	I0731 11:06:02.069961  182250 start.go:83] releasing machines lock for "stopped-upgrade-039423", held for 4.908570901s
	I0731 11:06:02.070021  182250 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" stopped-upgrade-039423
	I0731 11:06:02.085900  182250 ssh_runner.go:195] Run: cat /version.json
	I0731 11:06:02.085951  182250 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" stopped-upgrade-039423
	I0731 11:06:02.085993  182250 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
	I0731 11:06:02.086067  182250 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" stopped-upgrade-039423
	I0731 11:06:02.101781  182250 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32971 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/stopped-upgrade-039423/id_rsa Username:docker}
	I0731 11:06:02.103052  182250 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32971 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/stopped-upgrade-039423/id_rsa Username:docker}
	W0731 11:06:02.212246  182250 start.go:419] Unable to open version.json: cat /version.json: Process exited with status 1
	stdout:
	
	stderr:
	cat: /version.json: No such file or directory
	I0731 11:06:02.212305  182250 ssh_runner.go:195] Run: systemctl --version
	I0731 11:06:02.215886  182250 ssh_runner.go:195] Run: sudo sh -c "podman version >/dev/null"
	I0731 11:06:02.264368  182250 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
	I0731 11:06:02.268485  182250 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "sudo mv {} {}.mk_disabled" ;
	I0731 11:06:02.282797  182250 cni.go:221] loopback cni configuration disabled: "/etc/cni/net.d/*loopback.conf*" found
	I0731 11:06:02.282871  182250 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
	I0731 11:06:02.303725  182250 cni.go:262] disabled [/etc/cni/net.d/100-crio-bridge.conf, /etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
	I0731 11:06:02.303746  182250 start.go:466] detecting cgroup driver to use...
	I0731 11:06:02.303779  182250 detect.go:196] detected "cgroupfs" cgroup driver on host os
	I0731 11:06:02.303815  182250 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
	I0731 11:06:02.322545  182250 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
	I0731 11:06:02.331023  182250 docker.go:196] disabling cri-docker service (if available) ...
	I0731 11:06:02.331064  182250 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
	I0731 11:06:02.339051  182250 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
	I0731 11:06:02.347224  182250 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
	W0731 11:06:02.355337  182250 docker.go:206] Failed to disable socket "cri-docker.socket" (might be ok): sudo systemctl disable cri-docker.socket: Process exited with status 1
	stdout:
	
	stderr:
	Failed to disable unit: Unit file cri-docker.socket does not exist.
	I0731 11:06:02.355376  182250 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
	I0731 11:06:02.419188  182250 docker.go:212] disabling docker service ...
	I0731 11:06:02.419245  182250 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
	I0731 11:06:02.428348  182250 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
	I0731 11:06:02.437454  182250 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
	I0731 11:06:02.496148  182250 ssh_runner.go:195] Run: sudo systemctl mask docker.service
	I0731 11:06:02.568322  182250 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
	I0731 11:06:02.577416  182250 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/crio/crio.sock
	" | sudo tee /etc/crictl.yaml"
	I0731 11:06:02.589955  182250 crio.go:59] configure cri-o to use "registry.k8s.io/pause:3.2" pause image...
	I0731 11:06:02.590004  182250 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|^.*pause_image = .*$|pause_image = "registry.k8s.io/pause:3.2"|' /etc/crio/crio.conf.d/02-crio.conf"
	I0731 11:06:02.599731  182250 out.go:177] 
	W0731 11:06:02.601433  182250 out.go:239] X Exiting due to RUNTIME_ENABLE: Failed to enable container runtime: update pause_image: sh -c "sudo sed -i 's|^.*pause_image = .*$|pause_image = "registry.k8s.io/pause:3.2"|' /etc/crio/crio.conf.d/02-crio.conf": Process exited with status 2
	stdout:
	
	stderr:
	sed: can't read /etc/crio/crio.conf.d/02-crio.conf: No such file or directory
	
	X Exiting due to RUNTIME_ENABLE: Failed to enable container runtime: update pause_image: sh -c "sudo sed -i 's|^.*pause_image = .*$|pause_image = "registry.k8s.io/pause:3.2"|' /etc/crio/crio.conf.d/02-crio.conf": Process exited with status 2
	stdout:
	
	stderr:
	sed: can't read /etc/crio/crio.conf.d/02-crio.conf: No such file or directory
	
	W0731 11:06:02.601448  182250 out.go:239] * 
	* 
	W0731 11:06:02.602287  182250 out.go:239] ╭─────────────────────────────────────────────────────────────────────────────────────────────╮
	│                                                                                             │
	│    * If the above advice does not help, please let us know:                                 │
	│      https://github.com/kubernetes/minikube/issues/new/choose                               │
	│                                                                                             │
	│    * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue.    │
	│                                                                                             │
	╰─────────────────────────────────────────────────────────────────────────────────────────────╯
	╭─────────────────────────────────────────────────────────────────────────────────────────────╮
	│                                                                                             │
	│    * If the above advice does not help, please let us know:                                 │
	│      https://github.com/kubernetes/minikube/issues/new/choose                               │
	│                                                                                             │
	│    * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue.    │
	│                                                                                             │
	╰─────────────────────────────────────────────────────────────────────────────────────────────╯
	I0731 11:06:02.603929  182250 out.go:177] 

                                                
                                                
** /stderr **
version_upgrade_test.go:212: upgrade from v1.9.0 to HEAD failed: out/minikube-linux-amd64 start -p stopped-upgrade-039423 --memory=2200 --alsologtostderr -v=1 --driver=docker  --container-runtime=crio: exit status 90
--- FAIL: TestStoppedBinaryUpgrade/Upgrade (89.53s)

                                                
                                    

Test pass (273/304)

Order passed test Duration
3 TestDownloadOnly/v1.16.0/json-events 8.59
4 TestDownloadOnly/v1.16.0/preload-exists 0
8 TestDownloadOnly/v1.16.0/LogsDuration 0.05
10 TestDownloadOnly/v1.27.3/json-events 8.99
11 TestDownloadOnly/v1.27.3/preload-exists 0
15 TestDownloadOnly/v1.27.3/LogsDuration 0.05
16 TestDownloadOnly/DeleteAll 0.18
17 TestDownloadOnly/DeleteAlwaysSucceeds 0.11
18 TestDownloadOnlyKic 1.15
19 TestBinaryMirror 0.67
20 TestOffline 84.84
22 TestAddons/Setup 125.04
24 TestAddons/parallel/Registry 13.44
26 TestAddons/parallel/InspektorGadget 10.93
27 TestAddons/parallel/MetricsServer 5.76
28 TestAddons/parallel/HelmTiller 9.72
30 TestAddons/parallel/CSI 70.44
31 TestAddons/parallel/Headlamp 14.99
32 TestAddons/parallel/CloudSpanner 5.54
35 TestAddons/serial/GCPAuth/Namespaces 0.11
36 TestAddons/StoppedEnableDisable 12.07
37 TestCertOptions 26.61
38 TestCertExpiration 232.03
40 TestForceSystemdFlag 30.73
41 TestForceSystemdEnv 37.97
43 TestKVMDriverInstallOrUpdate 3.05
47 TestErrorSpam/setup 23.22
48 TestErrorSpam/start 0.55
49 TestErrorSpam/status 0.82
50 TestErrorSpam/pause 1.43
51 TestErrorSpam/unpause 1.4
52 TestErrorSpam/stop 1.34
55 TestFunctional/serial/CopySyncFile 0
56 TestFunctional/serial/StartWithProxy 68.39
57 TestFunctional/serial/AuditLog 0
58 TestFunctional/serial/SoftStart 25.13
59 TestFunctional/serial/KubeContext 0.04
60 TestFunctional/serial/KubectlGetPods 0.07
63 TestFunctional/serial/CacheCmd/cache/add_remote 2.76
64 TestFunctional/serial/CacheCmd/cache/add_local 1.09
65 TestFunctional/serial/CacheCmd/cache/CacheDelete 0.04
66 TestFunctional/serial/CacheCmd/cache/list 0.04
67 TestFunctional/serial/CacheCmd/cache/verify_cache_inside_node 0.25
68 TestFunctional/serial/CacheCmd/cache/cache_reload 1.54
69 TestFunctional/serial/CacheCmd/cache/delete 0.08
70 TestFunctional/serial/MinikubeKubectlCmd 0.1
71 TestFunctional/serial/MinikubeKubectlCmdDirectly 0.1
72 TestFunctional/serial/ExtraConfig 32.03
73 TestFunctional/serial/ComponentHealth 0.07
74 TestFunctional/serial/LogsCmd 1.26
75 TestFunctional/serial/LogsFileCmd 1.29
76 TestFunctional/serial/InvalidService 4.13
78 TestFunctional/parallel/ConfigCmd 0.33
79 TestFunctional/parallel/DashboardCmd 9.21
80 TestFunctional/parallel/DryRun 0.95
81 TestFunctional/parallel/InternationalLanguage 0.15
82 TestFunctional/parallel/StatusCmd 0.96
86 TestFunctional/parallel/ServiceCmdConnect 11.62
87 TestFunctional/parallel/AddonsCmd 0.16
88 TestFunctional/parallel/PersistentVolumeClaim 30.95
90 TestFunctional/parallel/SSHCmd 0.57
91 TestFunctional/parallel/CpCmd 1.14
92 TestFunctional/parallel/MySQL 21.83
93 TestFunctional/parallel/FileSync 0.26
94 TestFunctional/parallel/CertSync 1.53
98 TestFunctional/parallel/NodeLabels 0.08
100 TestFunctional/parallel/NonActiveRuntimeDisabled 0.57
102 TestFunctional/parallel/License 0.17
103 TestFunctional/parallel/Version/short 0.04
104 TestFunctional/parallel/Version/components 1.16
105 TestFunctional/parallel/ServiceCmd/DeployApp 9.17
107 TestFunctional/parallel/TunnelCmd/serial/RunSecondTunnel 0.44
108 TestFunctional/parallel/TunnelCmd/serial/StartTunnel 0
110 TestFunctional/parallel/TunnelCmd/serial/WaitService/Setup 11.42
111 TestFunctional/parallel/ServiceCmd/List 0.63
112 TestFunctional/parallel/ServiceCmd/JSONOutput 0.5
113 TestFunctional/parallel/ServiceCmd/HTTPS 0.33
114 TestFunctional/parallel/ServiceCmd/Format 0.37
115 TestFunctional/parallel/ServiceCmd/URL 0.33
116 TestFunctional/parallel/TunnelCmd/serial/WaitService/IngressIP 0.06
117 TestFunctional/parallel/TunnelCmd/serial/AccessDirect 0
121 TestFunctional/parallel/TunnelCmd/serial/DeleteTunnel 0.11
122 TestFunctional/parallel/UpdateContextCmd/no_changes 0.12
123 TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster 0.13
124 TestFunctional/parallel/UpdateContextCmd/no_clusters 0.13
125 TestFunctional/parallel/ImageCommands/ImageListShort 0.22
126 TestFunctional/parallel/ImageCommands/ImageListTable 0.22
127 TestFunctional/parallel/ImageCommands/ImageListJson 0.22
128 TestFunctional/parallel/ImageCommands/ImageListYaml 0.22
129 TestFunctional/parallel/ImageCommands/ImageBuild 1.74
130 TestFunctional/parallel/ImageCommands/Setup 0.9
131 TestFunctional/parallel/ProfileCmd/profile_not_create 0.38
132 TestFunctional/parallel/ProfileCmd/profile_list 0.34
133 TestFunctional/parallel/ImageCommands/ImageLoadDaemon 5.03
134 TestFunctional/parallel/ProfileCmd/profile_json_output 0.42
135 TestFunctional/parallel/MountCmd/any-port 6.89
136 TestFunctional/parallel/ImageCommands/ImageReloadDaemon 2.94
138 TestFunctional/parallel/ImageCommands/ImageTagAndLoadDaemon 4.92
139 TestFunctional/parallel/ImageCommands/ImageSaveToFile 1.23
140 TestFunctional/parallel/ImageCommands/ImageRemove 0.91
142 TestFunctional/parallel/MountCmd/VerifyCleanup 1.91
143 TestFunctional/parallel/ImageCommands/ImageSaveDaemon 3.53
144 TestFunctional/delete_addon-resizer_images 0.07
145 TestFunctional/delete_my-image_image 0.02
146 TestFunctional/delete_minikube_cached_images 0.01
150 TestIngressAddonLegacy/StartLegacyK8sCluster 78.32
152 TestIngressAddonLegacy/serial/ValidateIngressAddonActivation 10.72
153 TestIngressAddonLegacy/serial/ValidateIngressDNSAddonActivation 0.51
157 TestJSONOutput/start/Command 66.81
158 TestJSONOutput/start/Audit 0
160 TestJSONOutput/start/parallel/DistinctCurrentSteps 0
161 TestJSONOutput/start/parallel/IncreasingCurrentSteps 0
163 TestJSONOutput/pause/Command 0.62
164 TestJSONOutput/pause/Audit 0
166 TestJSONOutput/pause/parallel/DistinctCurrentSteps 0
167 TestJSONOutput/pause/parallel/IncreasingCurrentSteps 0
169 TestJSONOutput/unpause/Command 0.57
170 TestJSONOutput/unpause/Audit 0
172 TestJSONOutput/unpause/parallel/DistinctCurrentSteps 0
173 TestJSONOutput/unpause/parallel/IncreasingCurrentSteps 0
175 TestJSONOutput/stop/Command 5.64
176 TestJSONOutput/stop/Audit 0
178 TestJSONOutput/stop/parallel/DistinctCurrentSteps 0
179 TestJSONOutput/stop/parallel/IncreasingCurrentSteps 0
180 TestErrorJSONOutput 0.18
182 TestKicCustomNetwork/create_custom_network 31.24
183 TestKicCustomNetwork/use_default_bridge_network 23.22
184 TestKicExistingNetwork 27.13
185 TestKicCustomSubnet 23.75
186 TestKicStaticIP 26.03
187 TestMainNoArgs 0.04
188 TestMinikubeProfile 51.42
191 TestMountStart/serial/StartWithMountFirst 7.91
192 TestMountStart/serial/VerifyMountFirst 0.23
193 TestMountStart/serial/StartWithMountSecond 5.04
194 TestMountStart/serial/VerifyMountSecond 0.23
195 TestMountStart/serial/DeleteFirst 1.59
196 TestMountStart/serial/VerifyMountPostDelete 0.23
197 TestMountStart/serial/Stop 1.2
198 TestMountStart/serial/RestartStopped 6.94
199 TestMountStart/serial/VerifyMountPostStop 0.23
202 TestMultiNode/serial/FreshStart2Nodes 114.53
203 TestMultiNode/serial/DeployApp2Nodes 3.6
205 TestMultiNode/serial/AddNode 48.77
206 TestMultiNode/serial/ProfileList 0.26
207 TestMultiNode/serial/CopyFile 8.49
208 TestMultiNode/serial/StopNode 2.04
209 TestMultiNode/serial/StartAfterStop 10.45
210 TestMultiNode/serial/RestartKeepsNodes 109.82
211 TestMultiNode/serial/DeleteNode 4.57
212 TestMultiNode/serial/StopMultiNode 23.71
213 TestMultiNode/serial/RestartMultiNode 79.16
214 TestMultiNode/serial/ValidateNameConflict 25.89
219 TestPreload 125.05
221 TestScheduledStopUnix 98.06
224 TestInsufficientStorage 12.68
227 TestKubernetesUpgrade 359.78
228 TestMissingContainerUpgrade 133.12
231 TestNoKubernetes/serial/StartNoK8sWithVersion 0.07
234 TestNoKubernetes/serial/StartWithK8s 35.26
239 TestNetworkPlugins/group/false 8.67
243 TestNoKubernetes/serial/StartWithStopK8s 5.76
244 TestNoKubernetes/serial/Start 6.78
245 TestNoKubernetes/serial/VerifyK8sNotRunning 0.27
246 TestNoKubernetes/serial/ProfileList 1.16
247 TestNoKubernetes/serial/Stop 2.93
248 TestNoKubernetes/serial/StartNoArgs 6.28
249 TestNoKubernetes/serial/VerifyK8sNotRunningSecond 0.27
250 TestStoppedBinaryUpgrade/Setup 0.6
252 TestStoppedBinaryUpgrade/MinikubeLogs 0.49
261 TestPause/serial/Start 38.14
262 TestNetworkPlugins/group/auto/Start 46.17
263 TestPause/serial/SecondStartNoReconfiguration 29.59
264 TestNetworkPlugins/group/auto/KubeletFlags 0.35
265 TestNetworkPlugins/group/auto/NetCatPod 10.49
266 TestNetworkPlugins/group/kindnet/Start 67.39
267 TestNetworkPlugins/group/auto/DNS 0.16
268 TestNetworkPlugins/group/auto/Localhost 0.14
269 TestNetworkPlugins/group/auto/HairPin 0.14
270 TestPause/serial/Pause 0.75
271 TestPause/serial/VerifyStatus 0.3
272 TestPause/serial/Unpause 0.69
273 TestPause/serial/PauseAgain 0.89
274 TestPause/serial/DeletePaused 2.9
275 TestPause/serial/VerifyDeletedResources 1.81
276 TestNetworkPlugins/group/calico/Start 62.14
277 TestNetworkPlugins/group/custom-flannel/Start 56.79
278 TestNetworkPlugins/group/kindnet/ControllerPod 5.02
279 TestNetworkPlugins/group/kindnet/KubeletFlags 0.24
280 TestNetworkPlugins/group/kindnet/NetCatPod 10.33
281 TestNetworkPlugins/group/custom-flannel/KubeletFlags 0.28
282 TestNetworkPlugins/group/custom-flannel/NetCatPod 10.29
283 TestNetworkPlugins/group/calico/ControllerPod 5.03
284 TestNetworkPlugins/group/kindnet/DNS 0.15
285 TestNetworkPlugins/group/kindnet/Localhost 0.12
286 TestNetworkPlugins/group/kindnet/HairPin 0.13
287 TestNetworkPlugins/group/calico/KubeletFlags 0.25
288 TestNetworkPlugins/group/calico/NetCatPod 10.39
289 TestNetworkPlugins/group/custom-flannel/DNS 0.2
290 TestNetworkPlugins/group/custom-flannel/Localhost 0.2
291 TestNetworkPlugins/group/custom-flannel/HairPin 0.19
292 TestNetworkPlugins/group/calico/DNS 0.19
293 TestNetworkPlugins/group/calico/Localhost 0.15
294 TestNetworkPlugins/group/calico/HairPin 0.16
295 TestNetworkPlugins/group/enable-default-cni/Start 81.35
296 TestNetworkPlugins/group/flannel/Start 63.21
297 TestNetworkPlugins/group/bridge/Start 40.59
299 TestStartStop/group/old-k8s-version/serial/FirstStart 113.1
300 TestNetworkPlugins/group/bridge/KubeletFlags 0.35
301 TestNetworkPlugins/group/bridge/NetCatPod 12.36
302 TestNetworkPlugins/group/bridge/DNS 0.18
303 TestNetworkPlugins/group/bridge/Localhost 0.18
304 TestNetworkPlugins/group/bridge/HairPin 0.21
305 TestNetworkPlugins/group/flannel/ControllerPod 5.02
306 TestNetworkPlugins/group/flannel/KubeletFlags 0.29
307 TestNetworkPlugins/group/flannel/NetCatPod 10.33
308 TestNetworkPlugins/group/enable-default-cni/KubeletFlags 0.28
309 TestNetworkPlugins/group/enable-default-cni/NetCatPod 10.34
310 TestNetworkPlugins/group/flannel/DNS 0.21
311 TestNetworkPlugins/group/flannel/Localhost 0.15
312 TestNetworkPlugins/group/flannel/HairPin 0.16
313 TestNetworkPlugins/group/enable-default-cni/DNS 0.17
315 TestStartStop/group/no-preload/serial/FirstStart 57.35
316 TestNetworkPlugins/group/enable-default-cni/Localhost 0.15
317 TestNetworkPlugins/group/enable-default-cni/HairPin 0.15
319 TestStartStop/group/embed-certs/serial/FirstStart 72.27
321 TestStartStop/group/default-k8s-diff-port/serial/FirstStart 67.54
322 TestStartStop/group/no-preload/serial/DeployApp 8.39
323 TestStartStop/group/no-preload/serial/EnableAddonWhileActive 1.02
324 TestStartStop/group/no-preload/serial/Stop 11.91
325 TestStartStop/group/old-k8s-version/serial/DeployApp 7.41
326 TestStartStop/group/no-preload/serial/EnableAddonAfterStop 0.19
327 TestStartStop/group/no-preload/serial/SecondStart 334.11
328 TestStartStop/group/old-k8s-version/serial/EnableAddonWhileActive 0.74
329 TestStartStop/group/old-k8s-version/serial/Stop 11.94
330 TestStartStop/group/embed-certs/serial/DeployApp 7.42
331 TestStartStop/group/default-k8s-diff-port/serial/DeployApp 7.4
332 TestStartStop/group/old-k8s-version/serial/EnableAddonAfterStop 0.21
333 TestStartStop/group/old-k8s-version/serial/SecondStart 36.45
334 TestStartStop/group/embed-certs/serial/EnableAddonWhileActive 1.3
335 TestStartStop/group/embed-certs/serial/Stop 14.55
336 TestStartStop/group/default-k8s-diff-port/serial/EnableAddonWhileActive 1.08
337 TestStartStop/group/default-k8s-diff-port/serial/Stop 13
338 TestStartStop/group/embed-certs/serial/EnableAddonAfterStop 0.2
339 TestStartStop/group/default-k8s-diff-port/serial/EnableAddonAfterStop 0.19
340 TestStartStop/group/default-k8s-diff-port/serial/SecondStart 341.46
341 TestStartStop/group/embed-certs/serial/SecondStart 341.49
342 TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop 20.02
343 TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop 5.07
344 TestStartStop/group/old-k8s-version/serial/VerifyKubernetesImages 0.3
345 TestStartStop/group/old-k8s-version/serial/Pause 2.62
347 TestStartStop/group/newest-cni/serial/FirstStart 37.64
348 TestStartStop/group/newest-cni/serial/DeployApp 0
349 TestStartStop/group/newest-cni/serial/EnableAddonWhileActive 0.84
350 TestStartStop/group/newest-cni/serial/Stop 1.2
351 TestStartStop/group/newest-cni/serial/EnableAddonAfterStop 0.16
352 TestStartStop/group/newest-cni/serial/SecondStart 25.57
353 TestStartStop/group/newest-cni/serial/UserAppExistsAfterStop 0
354 TestStartStop/group/newest-cni/serial/AddonExistsAfterStop 0
355 TestStartStop/group/newest-cni/serial/VerifyKubernetesImages 0.28
356 TestStartStop/group/newest-cni/serial/Pause 2.46
357 TestStartStop/group/no-preload/serial/UserAppExistsAfterStop 14.07
358 TestStartStop/group/no-preload/serial/AddonExistsAfterStop 5.1
359 TestStartStop/group/no-preload/serial/VerifyKubernetesImages 0.38
360 TestStartStop/group/no-preload/serial/Pause 3.11
361 TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop 14.03
362 TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop 14.06
363 TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop 5.08
364 TestStartStop/group/embed-certs/serial/AddonExistsAfterStop 5.09
365 TestStartStop/group/default-k8s-diff-port/serial/VerifyKubernetesImages 0.29
366 TestStartStop/group/embed-certs/serial/VerifyKubernetesImages 0.3
367 TestStartStop/group/default-k8s-diff-port/serial/Pause 2.67
368 TestStartStop/group/embed-certs/serial/Pause 2.63
x
+
TestDownloadOnly/v1.16.0/json-events (8.59s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.16.0/json-events
aaa_download_only_test.go:69: (dbg) Run:  out/minikube-linux-amd64 start -o=json --download-only -p download-only-301760 --force --alsologtostderr --kubernetes-version=v1.16.0 --container-runtime=crio --driver=docker  --container-runtime=crio
aaa_download_only_test.go:69: (dbg) Done: out/minikube-linux-amd64 start -o=json --download-only -p download-only-301760 --force --alsologtostderr --kubernetes-version=v1.16.0 --container-runtime=crio --driver=docker  --container-runtime=crio: (8.586477467s)
--- PASS: TestDownloadOnly/v1.16.0/json-events (8.59s)

                                                
                                    
x
+
TestDownloadOnly/v1.16.0/preload-exists (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.16.0/preload-exists
--- PASS: TestDownloadOnly/v1.16.0/preload-exists (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.16.0/LogsDuration (0.05s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.16.0/LogsDuration
aaa_download_only_test.go:169: (dbg) Run:  out/minikube-linux-amd64 logs -p download-only-301760
aaa_download_only_test.go:169: (dbg) Non-zero exit: out/minikube-linux-amd64 logs -p download-only-301760: exit status 85 (54.238528ms)

                                                
                                                
-- stdout --
	* 
	* ==> Audit <==
	* |---------|--------------------------------|----------------------|---------|---------|---------------------|----------|
	| Command |              Args              |       Profile        |  User   | Version |     Start Time      | End Time |
	|---------|--------------------------------|----------------------|---------|---------|---------------------|----------|
	| start   | -o=json --download-only        | download-only-301760 | jenkins | v1.31.1 | 31 Jul 23 10:33 UTC |          |
	|         | -p download-only-301760        |                      |         |         |                     |          |
	|         | --force --alsologtostderr      |                      |         |         |                     |          |
	|         | --kubernetes-version=v1.16.0   |                      |         |         |                     |          |
	|         | --container-runtime=crio       |                      |         |         |                     |          |
	|         | --driver=docker                |                      |         |         |                     |          |
	|         | --container-runtime=crio       |                      |         |         |                     |          |
	|---------|--------------------------------|----------------------|---------|---------|---------------------|----------|
	
	* 
	* ==> Last Start <==
	* Log file created at: 2023/07/31 10:33:33
	Running on machine: ubuntu-20-agent-15
	Binary: Built with gc go1.20.6 for linux/amd64
	Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
	I0731 10:33:33.958063   12549 out.go:296] Setting OutFile to fd 1 ...
	I0731 10:33:33.958229   12549 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I0731 10:33:33.958241   12549 out.go:309] Setting ErrFile to fd 2...
	I0731 10:33:33.958248   12549 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I0731 10:33:33.958445   12549 root.go:338] Updating PATH: /home/jenkins/minikube-integration/16969-5799/.minikube/bin
	W0731 10:33:33.958564   12549 root.go:314] Error reading config file at /home/jenkins/minikube-integration/16969-5799/.minikube/config/config.json: open /home/jenkins/minikube-integration/16969-5799/.minikube/config/config.json: no such file or directory
	I0731 10:33:33.959108   12549 out.go:303] Setting JSON to true
	I0731 10:33:33.959967   12549 start.go:128] hostinfo: {"hostname":"ubuntu-20-agent-15","uptime":966,"bootTime":1690798648,"procs":174,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1038-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
	I0731 10:33:33.960027   12549 start.go:138] virtualization: kvm guest
	I0731 10:33:33.962399   12549 out.go:97] [download-only-301760] minikube v1.31.1 on Ubuntu 20.04 (kvm/amd64)
	I0731 10:33:33.964009   12549 out.go:169] MINIKUBE_LOCATION=16969
	W0731 10:33:33.962655   12549 preload.go:295] Failed to list preload files: open /home/jenkins/minikube-integration/16969-5799/.minikube/cache/preloaded-tarball: no such file or directory
	I0731 10:33:33.962801   12549 notify.go:220] Checking for updates...
	I0731 10:33:33.966665   12549 out.go:169] MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I0731 10:33:33.967882   12549 out.go:169] KUBECONFIG=/home/jenkins/minikube-integration/16969-5799/kubeconfig
	I0731 10:33:33.969207   12549 out.go:169] MINIKUBE_HOME=/home/jenkins/minikube-integration/16969-5799/.minikube
	I0731 10:33:33.970503   12549 out.go:169] MINIKUBE_BIN=out/minikube-linux-amd64
	W0731 10:33:33.972950   12549 out.go:272] minikube skips various validations when --force is supplied; this may lead to unexpected behavior
	I0731 10:33:33.973149   12549 driver.go:373] Setting default libvirt URI to qemu:///system
	I0731 10:33:33.994214   12549 docker.go:121] docker version: linux-24.0.5:Docker Engine - Community
	I0731 10:33:33.994278   12549 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0731 10:33:34.322853   12549 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:30 OomKillDisable:true NGoroutines:45 SystemTime:2023-07-31 10:33:34.31431867 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1038-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archit
ecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33648062464 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-15 Labels:[] ExperimentalBuild:false ServerVersion:24.0.5 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:3dce8eb055cbb6872793272b4f20ed16117344f8 Expected:3dce8eb055cbb6872793272b4f20ed16117344f8} RuncCommit:{ID:v1.1.7-0-g860f061 Expected:v1.1.7-0-g860f061} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.20.2] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
	I0731 10:33:34.322947   12549 docker.go:294] overlay module found
	I0731 10:33:34.324502   12549 out.go:97] Using the docker driver based on user configuration
	I0731 10:33:34.324519   12549 start.go:298] selected driver: docker
	I0731 10:33:34.324525   12549 start.go:898] validating driver "docker" against <nil>
	I0731 10:33:34.324588   12549 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0731 10:33:34.376813   12549 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:30 OomKillDisable:true NGoroutines:45 SystemTime:2023-07-31 10:33:34.369508346 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1038-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archi
tecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33648062464 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-15 Labels:[] ExperimentalBuild:false ServerVersion:24.0.5 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:3dce8eb055cbb6872793272b4f20ed16117344f8 Expected:3dce8eb055cbb6872793272b4f20ed16117344f8} RuncCommit:{ID:v1.1.7-0-g860f061 Expected:v1.1.7-0-g860f061} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.20.2] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
	I0731 10:33:34.377061   12549 start_flags.go:305] no existing cluster config was found, will generate one from the flags 
	I0731 10:33:34.377711   12549 start_flags.go:382] Using suggested 8000MB memory alloc based on sys=32089MB, container=32089MB
	I0731 10:33:34.377915   12549 start_flags.go:901] Wait components to verify : map[apiserver:true system_pods:true]
	I0731 10:33:34.379492   12549 out.go:169] Using Docker driver with root privileges
	I0731 10:33:34.380769   12549 cni.go:84] Creating CNI manager for ""
	I0731 10:33:34.380783   12549 cni.go:143] "docker" driver + "crio" runtime found, recommending kindnet
	I0731 10:33:34.380790   12549 start_flags.go:314] Found "CNI" CNI - setting NetworkPlugin=cni
	I0731 10:33:34.380801   12549 start_flags.go:319] config:
	{Name:download-only-301760 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 Memory:8000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.16.0 ClusterName:download-only-301760 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: Ne
tworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0}
	I0731 10:33:34.382177   12549 out.go:97] Starting control plane node download-only-301760 in cluster download-only-301760
	I0731 10:33:34.382226   12549 cache.go:122] Beginning downloading kic base image for docker with crio
	I0731 10:33:34.383358   12549 out.go:97] Pulling base image ...
	I0731 10:33:34.383384   12549 preload.go:132] Checking if preload exists for k8s version v1.16.0 and runtime crio
	I0731 10:33:34.383421   12549 image.go:79] Checking for gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 in local docker daemon
	I0731 10:33:34.397222   12549 cache.go:150] Downloading gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 to local cache
	I0731 10:33:34.397392   12549 image.go:63] Checking for gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 in local cache directory
	I0731 10:33:34.397493   12549 image.go:118] Writing gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 to local cache
	I0731 10:33:34.412146   12549 preload.go:119] Found remote preload: https://storage.googleapis.com/minikube-preloaded-volume-tarballs/v18/v1.16.0/preloaded-images-k8s-v18-v1.16.0-cri-o-overlay-amd64.tar.lz4
	I0731 10:33:34.412168   12549 cache.go:57] Caching tarball of preloaded images
	I0731 10:33:34.412265   12549 preload.go:132] Checking if preload exists for k8s version v1.16.0 and runtime crio
	I0731 10:33:34.413939   12549 out.go:97] Downloading Kubernetes v1.16.0 preload ...
	I0731 10:33:34.413956   12549 preload.go:238] getting checksum for preloaded-images-k8s-v18-v1.16.0-cri-o-overlay-amd64.tar.lz4 ...
	I0731 10:33:34.445287   12549 download.go:107] Downloading: https://storage.googleapis.com/minikube-preloaded-volume-tarballs/v18/v1.16.0/preloaded-images-k8s-v18-v1.16.0-cri-o-overlay-amd64.tar.lz4?checksum=md5:432b600409d778ea7a21214e83948570 -> /home/jenkins/minikube-integration/16969-5799/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.16.0-cri-o-overlay-amd64.tar.lz4
	I0731 10:33:37.890438   12549 cache.go:153] successfully saved gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 as a tarball
	I0731 10:33:38.615154   12549 preload.go:249] saving checksum for preloaded-images-k8s-v18-v1.16.0-cri-o-overlay-amd64.tar.lz4 ...
	I0731 10:33:38.615246   12549 preload.go:256] verifying checksum of /home/jenkins/minikube-integration/16969-5799/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.16.0-cri-o-overlay-amd64.tar.lz4 ...
	
	* 
	* The control plane node "" does not exist.
	  To start a cluster, run: "minikube start -p download-only-301760"

                                                
                                                
-- /stdout --
aaa_download_only_test.go:170: minikube logs failed with error: exit status 85
--- PASS: TestDownloadOnly/v1.16.0/LogsDuration (0.05s)

                                                
                                    
x
+
TestDownloadOnly/v1.27.3/json-events (8.99s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.27.3/json-events
aaa_download_only_test.go:69: (dbg) Run:  out/minikube-linux-amd64 start -o=json --download-only -p download-only-301760 --force --alsologtostderr --kubernetes-version=v1.27.3 --container-runtime=crio --driver=docker  --container-runtime=crio
aaa_download_only_test.go:69: (dbg) Done: out/minikube-linux-amd64 start -o=json --download-only -p download-only-301760 --force --alsologtostderr --kubernetes-version=v1.27.3 --container-runtime=crio --driver=docker  --container-runtime=crio: (8.993975696s)
--- PASS: TestDownloadOnly/v1.27.3/json-events (8.99s)

                                                
                                    
x
+
TestDownloadOnly/v1.27.3/preload-exists (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.27.3/preload-exists
--- PASS: TestDownloadOnly/v1.27.3/preload-exists (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.27.3/LogsDuration (0.05s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.27.3/LogsDuration
aaa_download_only_test.go:169: (dbg) Run:  out/minikube-linux-amd64 logs -p download-only-301760
aaa_download_only_test.go:169: (dbg) Non-zero exit: out/minikube-linux-amd64 logs -p download-only-301760: exit status 85 (53.240178ms)

                                                
                                                
-- stdout --
	* 
	* ==> Audit <==
	* |---------|--------------------------------|----------------------|---------|---------|---------------------|----------|
	| Command |              Args              |       Profile        |  User   | Version |     Start Time      | End Time |
	|---------|--------------------------------|----------------------|---------|---------|---------------------|----------|
	| start   | -o=json --download-only        | download-only-301760 | jenkins | v1.31.1 | 31 Jul 23 10:33 UTC |          |
	|         | -p download-only-301760        |                      |         |         |                     |          |
	|         | --force --alsologtostderr      |                      |         |         |                     |          |
	|         | --kubernetes-version=v1.16.0   |                      |         |         |                     |          |
	|         | --container-runtime=crio       |                      |         |         |                     |          |
	|         | --driver=docker                |                      |         |         |                     |          |
	|         | --container-runtime=crio       |                      |         |         |                     |          |
	| start   | -o=json --download-only        | download-only-301760 | jenkins | v1.31.1 | 31 Jul 23 10:33 UTC |          |
	|         | -p download-only-301760        |                      |         |         |                     |          |
	|         | --force --alsologtostderr      |                      |         |         |                     |          |
	|         | --kubernetes-version=v1.27.3   |                      |         |         |                     |          |
	|         | --container-runtime=crio       |                      |         |         |                     |          |
	|         | --driver=docker                |                      |         |         |                     |          |
	|         | --container-runtime=crio       |                      |         |         |                     |          |
	|---------|--------------------------------|----------------------|---------|---------|---------------------|----------|
	
	* 
	* ==> Last Start <==
	* Log file created at: 2023/07/31 10:33:42
	Running on machine: ubuntu-20-agent-15
	Binary: Built with gc go1.20.6 for linux/amd64
	Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
	I0731 10:33:42.602534   12706 out.go:296] Setting OutFile to fd 1 ...
	I0731 10:33:42.602618   12706 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I0731 10:33:42.602625   12706 out.go:309] Setting ErrFile to fd 2...
	I0731 10:33:42.602629   12706 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I0731 10:33:42.602809   12706 root.go:338] Updating PATH: /home/jenkins/minikube-integration/16969-5799/.minikube/bin
	W0731 10:33:42.602916   12706 root.go:314] Error reading config file at /home/jenkins/minikube-integration/16969-5799/.minikube/config/config.json: open /home/jenkins/minikube-integration/16969-5799/.minikube/config/config.json: no such file or directory
	I0731 10:33:42.603284   12706 out.go:303] Setting JSON to true
	I0731 10:33:42.604072   12706 start.go:128] hostinfo: {"hostname":"ubuntu-20-agent-15","uptime":975,"bootTime":1690798648,"procs":170,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1038-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
	I0731 10:33:42.604122   12706 start.go:138] virtualization: kvm guest
	I0731 10:33:42.606448   12706 out.go:97] [download-only-301760] minikube v1.31.1 on Ubuntu 20.04 (kvm/amd64)
	I0731 10:33:42.608046   12706 out.go:169] MINIKUBE_LOCATION=16969
	I0731 10:33:42.606547   12706 notify.go:220] Checking for updates...
	I0731 10:33:42.610898   12706 out.go:169] MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I0731 10:33:42.612576   12706 out.go:169] KUBECONFIG=/home/jenkins/minikube-integration/16969-5799/kubeconfig
	I0731 10:33:42.614126   12706 out.go:169] MINIKUBE_HOME=/home/jenkins/minikube-integration/16969-5799/.minikube
	I0731 10:33:42.615472   12706 out.go:169] MINIKUBE_BIN=out/minikube-linux-amd64
	W0731 10:33:42.618052   12706 out.go:272] minikube skips various validations when --force is supplied; this may lead to unexpected behavior
	I0731 10:33:42.618657   12706 config.go:182] Loaded profile config "download-only-301760": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.16.0
	W0731 10:33:42.618715   12706 start.go:806] api.Load failed for download-only-301760: filestore "download-only-301760": Docker machine "download-only-301760" does not exist. Use "docker-machine ls" to list machines. Use "docker-machine create" to add a new one.
	I0731 10:33:42.618856   12706 driver.go:373] Setting default libvirt URI to qemu:///system
	W0731 10:33:42.618896   12706 start.go:806] api.Load failed for download-only-301760: filestore "download-only-301760": Docker machine "download-only-301760" does not exist. Use "docker-machine ls" to list machines. Use "docker-machine create" to add a new one.
	I0731 10:33:42.642332   12706 docker.go:121] docker version: linux-24.0.5:Docker Engine - Community
	I0731 10:33:42.642422   12706 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0731 10:33:42.693559   12706 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:30 OomKillDisable:true NGoroutines:42 SystemTime:2023-07-31 10:33:42.685491497 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1038-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archi
tecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33648062464 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-15 Labels:[] ExperimentalBuild:false ServerVersion:24.0.5 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:3dce8eb055cbb6872793272b4f20ed16117344f8 Expected:3dce8eb055cbb6872793272b4f20ed16117344f8} RuncCommit:{ID:v1.1.7-0-g860f061 Expected:v1.1.7-0-g860f061} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.20.2] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
	I0731 10:33:42.693686   12706 docker.go:294] overlay module found
	I0731 10:33:42.695474   12706 out.go:97] Using the docker driver based on existing profile
	I0731 10:33:42.695501   12706 start.go:298] selected driver: docker
	I0731 10:33:42.695508   12706 start.go:898] validating driver "docker" against &{Name:download-only-301760 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 Memory:8000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.16.0 ClusterName:download-only-301760 Namespace:default APIServerName:minikubeCA APIServerName
s:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.16.0 ContainerRuntime:crio ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticI
P: SSHAuthSock: SSHAgentPID:0}
	I0731 10:33:42.695669   12706 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0731 10:33:42.742278   12706 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:30 OomKillDisable:true NGoroutines:42 SystemTime:2023-07-31 10:33:42.734820008 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1038-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archi
tecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33648062464 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-15 Labels:[] ExperimentalBuild:false ServerVersion:24.0.5 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:3dce8eb055cbb6872793272b4f20ed16117344f8 Expected:3dce8eb055cbb6872793272b4f20ed16117344f8} RuncCommit:{ID:v1.1.7-0-g860f061 Expected:v1.1.7-0-g860f061} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.20.2] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
	I0731 10:33:42.742871   12706 cni.go:84] Creating CNI manager for ""
	I0731 10:33:42.742889   12706 cni.go:143] "docker" driver + "crio" runtime found, recommending kindnet
	I0731 10:33:42.742902   12706 start_flags.go:319] config:
	{Name:download-only-301760 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 Memory:8000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.27.3 ClusterName:download-only-301760 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: Ne
tworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.16.0 ContainerRuntime:crio ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0}
	I0731 10:33:42.744790   12706 out.go:97] Starting control plane node download-only-301760 in cluster download-only-301760
	I0731 10:33:42.744808   12706 cache.go:122] Beginning downloading kic base image for docker with crio
	I0731 10:33:42.746290   12706 out.go:97] Pulling base image ...
	I0731 10:33:42.746315   12706 preload.go:132] Checking if preload exists for k8s version v1.27.3 and runtime crio
	I0731 10:33:42.746404   12706 image.go:79] Checking for gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 in local docker daemon
	I0731 10:33:42.760529   12706 cache.go:150] Downloading gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 to local cache
	I0731 10:33:42.760619   12706 image.go:63] Checking for gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 in local cache directory
	I0731 10:33:42.760636   12706 image.go:66] Found gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 in local cache directory, skipping pull
	I0731 10:33:42.760645   12706 image.go:105] gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 exists in cache, skipping pull
	I0731 10:33:42.760655   12706 cache.go:153] successfully saved gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 as a tarball
	I0731 10:33:42.784888   12706 preload.go:119] Found remote preload: https://storage.googleapis.com/minikube-preloaded-volume-tarballs/v18/v1.27.3/preloaded-images-k8s-v18-v1.27.3-cri-o-overlay-amd64.tar.lz4
	I0731 10:33:42.784911   12706 cache.go:57] Caching tarball of preloaded images
	I0731 10:33:42.785017   12706 preload.go:132] Checking if preload exists for k8s version v1.27.3 and runtime crio
	I0731 10:33:42.786740   12706 out.go:97] Downloading Kubernetes v1.27.3 preload ...
	I0731 10:33:42.786757   12706 preload.go:238] getting checksum for preloaded-images-k8s-v18-v1.27.3-cri-o-overlay-amd64.tar.lz4 ...
	I0731 10:33:42.820418   12706 download.go:107] Downloading: https://storage.googleapis.com/minikube-preloaded-volume-tarballs/v18/v1.27.3/preloaded-images-k8s-v18-v1.27.3-cri-o-overlay-amd64.tar.lz4?checksum=md5:36a3ccedce25b36b9ffc5201ce124dec -> /home/jenkins/minikube-integration/16969-5799/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.27.3-cri-o-overlay-amd64.tar.lz4
	I0731 10:33:47.265700   12706 preload.go:249] saving checksum for preloaded-images-k8s-v18-v1.27.3-cri-o-overlay-amd64.tar.lz4 ...
	I0731 10:33:47.265787   12706 preload.go:256] verifying checksum of /home/jenkins/minikube-integration/16969-5799/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.27.3-cri-o-overlay-amd64.tar.lz4 ...
	
	* 
	* The control plane node "" does not exist.
	  To start a cluster, run: "minikube start -p download-only-301760"

                                                
                                                
-- /stdout --
aaa_download_only_test.go:170: minikube logs failed with error: exit status 85
--- PASS: TestDownloadOnly/v1.27.3/LogsDuration (0.05s)

                                                
                                    
x
+
TestDownloadOnly/DeleteAll (0.18s)

                                                
                                                
=== RUN   TestDownloadOnly/DeleteAll
aaa_download_only_test.go:187: (dbg) Run:  out/minikube-linux-amd64 delete --all
--- PASS: TestDownloadOnly/DeleteAll (0.18s)

                                                
                                    
x
+
TestDownloadOnly/DeleteAlwaysSucceeds (0.11s)

                                                
                                                
=== RUN   TestDownloadOnly/DeleteAlwaysSucceeds
aaa_download_only_test.go:199: (dbg) Run:  out/minikube-linux-amd64 delete -p download-only-301760
--- PASS: TestDownloadOnly/DeleteAlwaysSucceeds (0.11s)

                                                
                                    
x
+
TestDownloadOnlyKic (1.15s)

                                                
                                                
=== RUN   TestDownloadOnlyKic
aaa_download_only_test.go:222: (dbg) Run:  out/minikube-linux-amd64 start --download-only -p download-docker-990933 --alsologtostderr --driver=docker  --container-runtime=crio
helpers_test.go:175: Cleaning up "download-docker-990933" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p download-docker-990933
--- PASS: TestDownloadOnlyKic (1.15s)

                                                
                                    
x
+
TestBinaryMirror (0.67s)

                                                
                                                
=== RUN   TestBinaryMirror
aaa_download_only_test.go:304: (dbg) Run:  out/minikube-linux-amd64 start --download-only -p binary-mirror-398610 --alsologtostderr --binary-mirror http://127.0.0.1:38677 --driver=docker  --container-runtime=crio
helpers_test.go:175: Cleaning up "binary-mirror-398610" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p binary-mirror-398610
--- PASS: TestBinaryMirror (0.67s)

                                                
                                    
x
+
TestOffline (84.84s)

                                                
                                                
=== RUN   TestOffline
=== PAUSE TestOffline

                                                
                                                

                                                
                                                
=== CONT  TestOffline
aab_offline_test.go:55: (dbg) Run:  out/minikube-linux-amd64 start -p offline-crio-930318 --alsologtostderr -v=1 --memory=2048 --wait=true --driver=docker  --container-runtime=crio
aab_offline_test.go:55: (dbg) Done: out/minikube-linux-amd64 start -p offline-crio-930318 --alsologtostderr -v=1 --memory=2048 --wait=true --driver=docker  --container-runtime=crio: (1m21.401631016s)
helpers_test.go:175: Cleaning up "offline-crio-930318" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p offline-crio-930318
helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p offline-crio-930318: (3.441186748s)
--- PASS: TestOffline (84.84s)

                                                
                                    
x
+
TestAddons/Setup (125.04s)

                                                
                                                
=== RUN   TestAddons/Setup
addons_test.go:88: (dbg) Run:  out/minikube-linux-amd64 start -p addons-764200 --wait=true --memory=4000 --alsologtostderr --addons=registry --addons=metrics-server --addons=volumesnapshots --addons=csi-hostpath-driver --addons=gcp-auth --addons=cloud-spanner --addons=inspektor-gadget --driver=docker  --container-runtime=crio --addons=ingress --addons=ingress-dns --addons=helm-tiller
addons_test.go:88: (dbg) Done: out/minikube-linux-amd64 start -p addons-764200 --wait=true --memory=4000 --alsologtostderr --addons=registry --addons=metrics-server --addons=volumesnapshots --addons=csi-hostpath-driver --addons=gcp-auth --addons=cloud-spanner --addons=inspektor-gadget --driver=docker  --container-runtime=crio --addons=ingress --addons=ingress-dns --addons=helm-tiller: (2m5.03678791s)
--- PASS: TestAddons/Setup (125.04s)

                                                
                                    
x
+
TestAddons/parallel/Registry (13.44s)

                                                
                                                
=== RUN   TestAddons/parallel/Registry
=== PAUSE TestAddons/parallel/Registry

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/Registry
addons_test.go:306: registry stabilized in 16.468365ms
addons_test.go:308: (dbg) TestAddons/parallel/Registry: waiting 6m0s for pods matching "actual-registry=true" in namespace "kube-system" ...
helpers_test.go:344: "registry-6jvkx" [30f6df32-a1ae-4320-9d21-a4ba4e21c885] Running
addons_test.go:308: (dbg) TestAddons/parallel/Registry: actual-registry=true healthy within 5.0120464s
addons_test.go:311: (dbg) TestAddons/parallel/Registry: waiting 10m0s for pods matching "registry-proxy=true" in namespace "kube-system" ...
helpers_test.go:344: "registry-proxy-x29w4" [babf24fb-dbb5-4870-af51-93a0c43cd64a] Running
addons_test.go:311: (dbg) TestAddons/parallel/Registry: registry-proxy=true healthy within 5.011043417s
addons_test.go:316: (dbg) Run:  kubectl --context addons-764200 delete po -l run=registry-test --now
addons_test.go:321: (dbg) Run:  kubectl --context addons-764200 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c "wget --spider -S http://registry.kube-system.svc.cluster.local"
addons_test.go:321: (dbg) Done: kubectl --context addons-764200 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c "wget --spider -S http://registry.kube-system.svc.cluster.local": (2.570479802s)
addons_test.go:335: (dbg) Run:  out/minikube-linux-amd64 -p addons-764200 ip
2023/07/31 10:36:11 [DEBUG] GET http://192.168.49.2:5000
addons_test.go:364: (dbg) Run:  out/minikube-linux-amd64 -p addons-764200 addons disable registry --alsologtostderr -v=1
--- PASS: TestAddons/parallel/Registry (13.44s)

                                                
                                    
x
+
TestAddons/parallel/InspektorGadget (10.93s)

                                                
                                                
=== RUN   TestAddons/parallel/InspektorGadget
=== PAUSE TestAddons/parallel/InspektorGadget

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/InspektorGadget
addons_test.go:814: (dbg) TestAddons/parallel/InspektorGadget: waiting 8m0s for pods matching "k8s-app=gadget" in namespace "gadget" ...
helpers_test.go:344: "gadget-7gkdx" [ef8cbd6c-ac63-4796-b14d-c758b9f8f7ad] Running
addons_test.go:814: (dbg) TestAddons/parallel/InspektorGadget: k8s-app=gadget healthy within 5.009478421s
addons_test.go:817: (dbg) Run:  out/minikube-linux-amd64 addons disable inspektor-gadget -p addons-764200
addons_test.go:817: (dbg) Done: out/minikube-linux-amd64 addons disable inspektor-gadget -p addons-764200: (5.916055819s)
--- PASS: TestAddons/parallel/InspektorGadget (10.93s)

                                                
                                    
x
+
TestAddons/parallel/MetricsServer (5.76s)

                                                
                                                
=== RUN   TestAddons/parallel/MetricsServer
=== PAUSE TestAddons/parallel/MetricsServer

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/MetricsServer
addons_test.go:383: metrics-server stabilized in 14.393383ms
addons_test.go:385: (dbg) TestAddons/parallel/MetricsServer: waiting 6m0s for pods matching "k8s-app=metrics-server" in namespace "kube-system" ...
helpers_test.go:344: "metrics-server-7746886d4f-hxf8x" [e382d580-9b99-4052-af8a-b12f7feb77c8] Running
addons_test.go:385: (dbg) TestAddons/parallel/MetricsServer: k8s-app=metrics-server healthy within 5.011876448s
addons_test.go:391: (dbg) Run:  kubectl --context addons-764200 top pods -n kube-system
addons_test.go:408: (dbg) Run:  out/minikube-linux-amd64 -p addons-764200 addons disable metrics-server --alsologtostderr -v=1
--- PASS: TestAddons/parallel/MetricsServer (5.76s)

                                                
                                    
x
+
TestAddons/parallel/HelmTiller (9.72s)

                                                
                                                
=== RUN   TestAddons/parallel/HelmTiller
=== PAUSE TestAddons/parallel/HelmTiller

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/HelmTiller
addons_test.go:432: tiller-deploy stabilized in 3.099168ms
addons_test.go:434: (dbg) TestAddons/parallel/HelmTiller: waiting 6m0s for pods matching "app=helm" in namespace "kube-system" ...
helpers_test.go:344: "tiller-deploy-6847666dc-znc64" [638f1f2a-cd4f-4f07-8b4c-603fd5bafca3] Running
addons_test.go:434: (dbg) TestAddons/parallel/HelmTiller: app=helm healthy within 5.058485239s
addons_test.go:449: (dbg) Run:  kubectl --context addons-764200 run --rm helm-test --restart=Never --image=docker.io/alpine/helm:2.16.3 -it --namespace=kube-system -- version
addons_test.go:449: (dbg) Done: kubectl --context addons-764200 run --rm helm-test --restart=Never --image=docker.io/alpine/helm:2.16.3 -it --namespace=kube-system -- version: (4.190422381s)
addons_test.go:466: (dbg) Run:  out/minikube-linux-amd64 -p addons-764200 addons disable helm-tiller --alsologtostderr -v=1
--- PASS: TestAddons/parallel/HelmTiller (9.72s)

                                                
                                    
x
+
TestAddons/parallel/CSI (70.44s)

                                                
                                                
=== RUN   TestAddons/parallel/CSI
=== PAUSE TestAddons/parallel/CSI

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/CSI
addons_test.go:537: csi-hostpath-driver pods stabilized in 5.411897ms
addons_test.go:540: (dbg) Run:  kubectl --context addons-764200 create -f testdata/csi-hostpath-driver/pvc.yaml
addons_test.go:545: (dbg) TestAddons/parallel/CSI: waiting 6m0s for pvc "hpvc" in namespace "default" ...
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc -o jsonpath={.status.phase} -n default
addons_test.go:550: (dbg) Run:  kubectl --context addons-764200 create -f testdata/csi-hostpath-driver/pv-pod.yaml
addons_test.go:555: (dbg) TestAddons/parallel/CSI: waiting 6m0s for pods matching "app=task-pv-pod" in namespace "default" ...
helpers_test.go:344: "task-pv-pod" [95e41e20-6b3d-45ac-b9dc-cfa4bae56aa7] Pending
helpers_test.go:344: "task-pv-pod" [95e41e20-6b3d-45ac-b9dc-cfa4bae56aa7] Pending / Ready:ContainersNotReady (containers with unready status: [task-pv-container]) / ContainersReady:ContainersNotReady (containers with unready status: [task-pv-container])
helpers_test.go:344: "task-pv-pod" [95e41e20-6b3d-45ac-b9dc-cfa4bae56aa7] Running
addons_test.go:555: (dbg) TestAddons/parallel/CSI: app=task-pv-pod healthy within 13.06048753s
addons_test.go:560: (dbg) Run:  kubectl --context addons-764200 create -f testdata/csi-hostpath-driver/snapshot.yaml
addons_test.go:565: (dbg) TestAddons/parallel/CSI: waiting 6m0s for volume snapshot "new-snapshot-demo" in namespace "default" ...
helpers_test.go:419: (dbg) Run:  kubectl --context addons-764200 get volumesnapshot new-snapshot-demo -o jsonpath={.status.readyToUse} -n default
helpers_test.go:419: (dbg) Run:  kubectl --context addons-764200 get volumesnapshot new-snapshot-demo -o jsonpath={.status.readyToUse} -n default
helpers_test.go:419: (dbg) Run:  kubectl --context addons-764200 get volumesnapshot new-snapshot-demo -o jsonpath={.status.readyToUse} -n default
addons_test.go:570: (dbg) Run:  kubectl --context addons-764200 delete pod task-pv-pod
addons_test.go:576: (dbg) Run:  kubectl --context addons-764200 delete pvc hpvc
addons_test.go:582: (dbg) Run:  kubectl --context addons-764200 create -f testdata/csi-hostpath-driver/pvc-restore.yaml
addons_test.go:587: (dbg) TestAddons/parallel/CSI: waiting 6m0s for pvc "hpvc-restore" in namespace "default" ...
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-764200 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
addons_test.go:592: (dbg) Run:  kubectl --context addons-764200 create -f testdata/csi-hostpath-driver/pv-pod-restore.yaml
addons_test.go:597: (dbg) TestAddons/parallel/CSI: waiting 6m0s for pods matching "app=task-pv-pod-restore" in namespace "default" ...
helpers_test.go:344: "task-pv-pod-restore" [e8be1c7b-4fda-4550-825d-79ed3d4b82c3] Pending
helpers_test.go:344: "task-pv-pod-restore" [e8be1c7b-4fda-4550-825d-79ed3d4b82c3] Pending / Ready:ContainersNotReady (containers with unready status: [task-pv-container]) / ContainersReady:ContainersNotReady (containers with unready status: [task-pv-container])
helpers_test.go:344: "task-pv-pod-restore" [e8be1c7b-4fda-4550-825d-79ed3d4b82c3] Running
addons_test.go:597: (dbg) TestAddons/parallel/CSI: app=task-pv-pod-restore healthy within 7.008341676s
addons_test.go:602: (dbg) Run:  kubectl --context addons-764200 delete pod task-pv-pod-restore
addons_test.go:602: (dbg) Done: kubectl --context addons-764200 delete pod task-pv-pod-restore: (1.314175144s)
addons_test.go:606: (dbg) Run:  kubectl --context addons-764200 delete pvc hpvc-restore
addons_test.go:610: (dbg) Run:  kubectl --context addons-764200 delete volumesnapshot new-snapshot-demo
addons_test.go:614: (dbg) Run:  out/minikube-linux-amd64 -p addons-764200 addons disable csi-hostpath-driver --alsologtostderr -v=1
addons_test.go:614: (dbg) Done: out/minikube-linux-amd64 -p addons-764200 addons disable csi-hostpath-driver --alsologtostderr -v=1: (6.520489267s)
addons_test.go:618: (dbg) Run:  out/minikube-linux-amd64 -p addons-764200 addons disable volumesnapshots --alsologtostderr -v=1
--- PASS: TestAddons/parallel/CSI (70.44s)

                                                
                                    
x
+
TestAddons/parallel/Headlamp (14.99s)

                                                
                                                
=== RUN   TestAddons/parallel/Headlamp
=== PAUSE TestAddons/parallel/Headlamp

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/Headlamp
addons_test.go:800: (dbg) Run:  out/minikube-linux-amd64 addons enable headlamp -p addons-764200 --alsologtostderr -v=1
addons_test.go:805: (dbg) TestAddons/parallel/Headlamp: waiting 8m0s for pods matching "app.kubernetes.io/name=headlamp" in namespace "headlamp" ...
helpers_test.go:344: "headlamp-66f6498c69-6j98t" [f93ddc71-2667-4bef-b045-aff0efdf611a] Pending / Ready:ContainersNotReady (containers with unready status: [headlamp]) / ContainersReady:ContainersNotReady (containers with unready status: [headlamp])
helpers_test.go:344: "headlamp-66f6498c69-6j98t" [f93ddc71-2667-4bef-b045-aff0efdf611a] Running
addons_test.go:805: (dbg) TestAddons/parallel/Headlamp: app.kubernetes.io/name=headlamp healthy within 14.041766512s
--- PASS: TestAddons/parallel/Headlamp (14.99s)

                                                
                                    
x
+
TestAddons/parallel/CloudSpanner (5.54s)

                                                
                                                
=== RUN   TestAddons/parallel/CloudSpanner
=== PAUSE TestAddons/parallel/CloudSpanner

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/CloudSpanner
addons_test.go:833: (dbg) TestAddons/parallel/CloudSpanner: waiting 6m0s for pods matching "app=cloud-spanner-emulator" in namespace "default" ...
helpers_test.go:344: "cloud-spanner-emulator-88647b4cb-mv2xq" [41cc5b62-11af-4e99-9699-adc2d84c5231] Running
addons_test.go:833: (dbg) TestAddons/parallel/CloudSpanner: app=cloud-spanner-emulator healthy within 5.008086086s
addons_test.go:836: (dbg) Run:  out/minikube-linux-amd64 addons disable cloud-spanner -p addons-764200
--- PASS: TestAddons/parallel/CloudSpanner (5.54s)

                                                
                                    
x
+
TestAddons/serial/GCPAuth/Namespaces (0.11s)

                                                
                                                
=== RUN   TestAddons/serial/GCPAuth/Namespaces
addons_test.go:626: (dbg) Run:  kubectl --context addons-764200 create ns new-namespace
addons_test.go:640: (dbg) Run:  kubectl --context addons-764200 get secret gcp-auth -n new-namespace
--- PASS: TestAddons/serial/GCPAuth/Namespaces (0.11s)

                                                
                                    
x
+
TestAddons/StoppedEnableDisable (12.07s)

                                                
                                                
=== RUN   TestAddons/StoppedEnableDisable
addons_test.go:148: (dbg) Run:  out/minikube-linux-amd64 stop -p addons-764200
addons_test.go:148: (dbg) Done: out/minikube-linux-amd64 stop -p addons-764200: (11.860923708s)
addons_test.go:152: (dbg) Run:  out/minikube-linux-amd64 addons enable dashboard -p addons-764200
addons_test.go:156: (dbg) Run:  out/minikube-linux-amd64 addons disable dashboard -p addons-764200
addons_test.go:161: (dbg) Run:  out/minikube-linux-amd64 addons disable gvisor -p addons-764200
--- PASS: TestAddons/StoppedEnableDisable (12.07s)

                                                
                                    
x
+
TestCertOptions (26.61s)

                                                
                                                
=== RUN   TestCertOptions
=== PAUSE TestCertOptions

                                                
                                                

                                                
                                                
=== CONT  TestCertOptions
cert_options_test.go:49: (dbg) Run:  out/minikube-linux-amd64 start -p cert-options-852335 --memory=2048 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=docker  --container-runtime=crio
cert_options_test.go:49: (dbg) Done: out/minikube-linux-amd64 start -p cert-options-852335 --memory=2048 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=docker  --container-runtime=crio: (21.512701186s)
cert_options_test.go:60: (dbg) Run:  out/minikube-linux-amd64 -p cert-options-852335 ssh "openssl x509 -text -noout -in /var/lib/minikube/certs/apiserver.crt"
cert_options_test.go:88: (dbg) Run:  kubectl --context cert-options-852335 config view
cert_options_test.go:100: (dbg) Run:  out/minikube-linux-amd64 ssh -p cert-options-852335 -- "sudo cat /etc/kubernetes/admin.conf"
helpers_test.go:175: Cleaning up "cert-options-852335" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p cert-options-852335
helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p cert-options-852335: (4.449801215s)
--- PASS: TestCertOptions (26.61s)

                                                
                                    
x
+
TestCertExpiration (232.03s)

                                                
                                                
=== RUN   TestCertExpiration
=== PAUSE TestCertExpiration

                                                
                                                

                                                
                                                
=== CONT  TestCertExpiration
cert_options_test.go:123: (dbg) Run:  out/minikube-linux-amd64 start -p cert-expiration-666955 --memory=2048 --cert-expiration=3m --driver=docker  --container-runtime=crio
cert_options_test.go:123: (dbg) Done: out/minikube-linux-amd64 start -p cert-expiration-666955 --memory=2048 --cert-expiration=3m --driver=docker  --container-runtime=crio: (24.572407993s)
cert_options_test.go:131: (dbg) Run:  out/minikube-linux-amd64 start -p cert-expiration-666955 --memory=2048 --cert-expiration=8760h --driver=docker  --container-runtime=crio
cert_options_test.go:131: (dbg) Done: out/minikube-linux-amd64 start -p cert-expiration-666955 --memory=2048 --cert-expiration=8760h --driver=docker  --container-runtime=crio: (25.550596721s)
helpers_test.go:175: Cleaning up "cert-expiration-666955" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p cert-expiration-666955
helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p cert-expiration-666955: (1.908917061s)
--- PASS: TestCertExpiration (232.03s)

                                                
                                    
x
+
TestForceSystemdFlag (30.73s)

                                                
                                                
=== RUN   TestForceSystemdFlag
=== PAUSE TestForceSystemdFlag

                                                
                                                

                                                
                                                
=== CONT  TestForceSystemdFlag
docker_test.go:91: (dbg) Run:  out/minikube-linux-amd64 start -p force-systemd-flag-674288 --memory=2048 --force-systemd --alsologtostderr -v=5 --driver=docker  --container-runtime=crio
docker_test.go:91: (dbg) Done: out/minikube-linux-amd64 start -p force-systemd-flag-674288 --memory=2048 --force-systemd --alsologtostderr -v=5 --driver=docker  --container-runtime=crio: (26.232545682s)
docker_test.go:132: (dbg) Run:  out/minikube-linux-amd64 -p force-systemd-flag-674288 ssh "cat /etc/crio/crio.conf.d/02-crio.conf"
helpers_test.go:175: Cleaning up "force-systemd-flag-674288" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p force-systemd-flag-674288
helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p force-systemd-flag-674288: (4.209144543s)
--- PASS: TestForceSystemdFlag (30.73s)

                                                
                                    
x
+
TestForceSystemdEnv (37.97s)

                                                
                                                
=== RUN   TestForceSystemdEnv
=== PAUSE TestForceSystemdEnv

                                                
                                                

                                                
                                                
=== CONT  TestForceSystemdEnv
docker_test.go:155: (dbg) Run:  out/minikube-linux-amd64 start -p force-systemd-env-929753 --memory=2048 --alsologtostderr -v=5 --driver=docker  --container-runtime=crio
docker_test.go:155: (dbg) Done: out/minikube-linux-amd64 start -p force-systemd-env-929753 --memory=2048 --alsologtostderr -v=5 --driver=docker  --container-runtime=crio: (35.565710963s)
helpers_test.go:175: Cleaning up "force-systemd-env-929753" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p force-systemd-env-929753
helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p force-systemd-env-929753: (2.405883283s)
--- PASS: TestForceSystemdEnv (37.97s)

                                                
                                    
x
+
TestKVMDriverInstallOrUpdate (3.05s)

                                                
                                                
=== RUN   TestKVMDriverInstallOrUpdate
=== PAUSE TestKVMDriverInstallOrUpdate

                                                
                                                

                                                
                                                
=== CONT  TestKVMDriverInstallOrUpdate
--- PASS: TestKVMDriverInstallOrUpdate (3.05s)

                                                
                                    
x
+
TestErrorSpam/setup (23.22s)

                                                
                                                
=== RUN   TestErrorSpam/setup
error_spam_test.go:81: (dbg) Run:  out/minikube-linux-amd64 start -p nospam-313036 -n=1 --memory=2250 --wait=false --log_dir=/tmp/nospam-313036 --driver=docker  --container-runtime=crio
error_spam_test.go:81: (dbg) Done: out/minikube-linux-amd64 start -p nospam-313036 -n=1 --memory=2250 --wait=false --log_dir=/tmp/nospam-313036 --driver=docker  --container-runtime=crio: (23.216163692s)
--- PASS: TestErrorSpam/setup (23.22s)

                                                
                                    
x
+
TestErrorSpam/start (0.55s)

                                                
                                                
=== RUN   TestErrorSpam/start
error_spam_test.go:216: Cleaning up 1 logfile(s) ...
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-amd64 -p nospam-313036 --log_dir /tmp/nospam-313036 start --dry-run
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-amd64 -p nospam-313036 --log_dir /tmp/nospam-313036 start --dry-run
error_spam_test.go:182: (dbg) Run:  out/minikube-linux-amd64 -p nospam-313036 --log_dir /tmp/nospam-313036 start --dry-run
--- PASS: TestErrorSpam/start (0.55s)

                                                
                                    
x
+
TestErrorSpam/status (0.82s)

                                                
                                                
=== RUN   TestErrorSpam/status
error_spam_test.go:216: Cleaning up 0 logfile(s) ...
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-amd64 -p nospam-313036 --log_dir /tmp/nospam-313036 status
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-amd64 -p nospam-313036 --log_dir /tmp/nospam-313036 status
error_spam_test.go:182: (dbg) Run:  out/minikube-linux-amd64 -p nospam-313036 --log_dir /tmp/nospam-313036 status
--- PASS: TestErrorSpam/status (0.82s)

                                                
                                    
x
+
TestErrorSpam/pause (1.43s)

                                                
                                                
=== RUN   TestErrorSpam/pause
error_spam_test.go:216: Cleaning up 0 logfile(s) ...
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-amd64 -p nospam-313036 --log_dir /tmp/nospam-313036 pause
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-amd64 -p nospam-313036 --log_dir /tmp/nospam-313036 pause
error_spam_test.go:182: (dbg) Run:  out/minikube-linux-amd64 -p nospam-313036 --log_dir /tmp/nospam-313036 pause
--- PASS: TestErrorSpam/pause (1.43s)

                                                
                                    
x
+
TestErrorSpam/unpause (1.4s)

                                                
                                                
=== RUN   TestErrorSpam/unpause
error_spam_test.go:216: Cleaning up 0 logfile(s) ...
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-amd64 -p nospam-313036 --log_dir /tmp/nospam-313036 unpause
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-amd64 -p nospam-313036 --log_dir /tmp/nospam-313036 unpause
error_spam_test.go:182: (dbg) Run:  out/minikube-linux-amd64 -p nospam-313036 --log_dir /tmp/nospam-313036 unpause
--- PASS: TestErrorSpam/unpause (1.40s)

                                                
                                    
x
+
TestErrorSpam/stop (1.34s)

                                                
                                                
=== RUN   TestErrorSpam/stop
error_spam_test.go:216: Cleaning up 0 logfile(s) ...
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-amd64 -p nospam-313036 --log_dir /tmp/nospam-313036 stop
error_spam_test.go:159: (dbg) Done: out/minikube-linux-amd64 -p nospam-313036 --log_dir /tmp/nospam-313036 stop: (1.181112464s)
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-amd64 -p nospam-313036 --log_dir /tmp/nospam-313036 stop
error_spam_test.go:182: (dbg) Run:  out/minikube-linux-amd64 -p nospam-313036 --log_dir /tmp/nospam-313036 stop
--- PASS: TestErrorSpam/stop (1.34s)

                                                
                                    
x
+
TestFunctional/serial/CopySyncFile (0s)

                                                
                                                
=== RUN   TestFunctional/serial/CopySyncFile
functional_test.go:1851: local sync path: /home/jenkins/minikube-integration/16969-5799/.minikube/files/etc/test/nested/copy/12537/hosts
--- PASS: TestFunctional/serial/CopySyncFile (0.00s)

                                                
                                    
x
+
TestFunctional/serial/StartWithProxy (68.39s)

                                                
                                                
=== RUN   TestFunctional/serial/StartWithProxy
functional_test.go:2230: (dbg) Run:  out/minikube-linux-amd64 start -p functional-683521 --memory=4000 --apiserver-port=8441 --wait=all --driver=docker  --container-runtime=crio
E0731 10:40:58.882029   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/client.crt: no such file or directory
E0731 10:40:58.887792   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/client.crt: no such file or directory
E0731 10:40:58.898041   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/client.crt: no such file or directory
E0731 10:40:58.918254   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/client.crt: no such file or directory
E0731 10:40:58.958496   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/client.crt: no such file or directory
E0731 10:40:59.038774   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/client.crt: no such file or directory
E0731 10:40:59.199182   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/client.crt: no such file or directory
E0731 10:40:59.519562   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/client.crt: no such file or directory
E0731 10:41:00.160450   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/client.crt: no such file or directory
E0731 10:41:01.440902   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/client.crt: no such file or directory
functional_test.go:2230: (dbg) Done: out/minikube-linux-amd64 start -p functional-683521 --memory=4000 --apiserver-port=8441 --wait=all --driver=docker  --container-runtime=crio: (1m8.393417884s)
--- PASS: TestFunctional/serial/StartWithProxy (68.39s)

                                                
                                    
x
+
TestFunctional/serial/AuditLog (0s)

                                                
                                                
=== RUN   TestFunctional/serial/AuditLog
--- PASS: TestFunctional/serial/AuditLog (0.00s)

                                                
                                    
x
+
TestFunctional/serial/SoftStart (25.13s)

                                                
                                                
=== RUN   TestFunctional/serial/SoftStart
functional_test.go:655: (dbg) Run:  out/minikube-linux-amd64 start -p functional-683521 --alsologtostderr -v=8
E0731 10:41:04.001390   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/client.crt: no such file or directory
E0731 10:41:09.122453   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/client.crt: no such file or directory
E0731 10:41:19.363405   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/client.crt: no such file or directory
functional_test.go:655: (dbg) Done: out/minikube-linux-amd64 start -p functional-683521 --alsologtostderr -v=8: (25.129889993s)
functional_test.go:659: soft start took 25.130555458s for "functional-683521" cluster.
--- PASS: TestFunctional/serial/SoftStart (25.13s)

                                                
                                    
x
+
TestFunctional/serial/KubeContext (0.04s)

                                                
                                                
=== RUN   TestFunctional/serial/KubeContext
functional_test.go:677: (dbg) Run:  kubectl config current-context
--- PASS: TestFunctional/serial/KubeContext (0.04s)

                                                
                                    
x
+
TestFunctional/serial/KubectlGetPods (0.07s)

                                                
                                                
=== RUN   TestFunctional/serial/KubectlGetPods
functional_test.go:692: (dbg) Run:  kubectl --context functional-683521 get po -A
--- PASS: TestFunctional/serial/KubectlGetPods (0.07s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/add_remote (2.76s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/add_remote
functional_test.go:1045: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 cache add registry.k8s.io/pause:3.1
functional_test.go:1045: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 cache add registry.k8s.io/pause:3.3
functional_test.go:1045: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 cache add registry.k8s.io/pause:latest
--- PASS: TestFunctional/serial/CacheCmd/cache/add_remote (2.76s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/add_local (1.09s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/add_local
functional_test.go:1073: (dbg) Run:  docker build -t minikube-local-cache-test:functional-683521 /tmp/TestFunctionalserialCacheCmdcacheadd_local2955978906/001
functional_test.go:1085: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 cache add minikube-local-cache-test:functional-683521
functional_test.go:1090: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 cache delete minikube-local-cache-test:functional-683521
functional_test.go:1079: (dbg) Run:  docker rmi minikube-local-cache-test:functional-683521
--- PASS: TestFunctional/serial/CacheCmd/cache/add_local (1.09s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/CacheDelete (0.04s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/CacheDelete
functional_test.go:1098: (dbg) Run:  out/minikube-linux-amd64 cache delete registry.k8s.io/pause:3.3
--- PASS: TestFunctional/serial/CacheCmd/cache/CacheDelete (0.04s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/list (0.04s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/list
functional_test.go:1106: (dbg) Run:  out/minikube-linux-amd64 cache list
--- PASS: TestFunctional/serial/CacheCmd/cache/list (0.04s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/verify_cache_inside_node (0.25s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/verify_cache_inside_node
functional_test.go:1120: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh sudo crictl images
--- PASS: TestFunctional/serial/CacheCmd/cache/verify_cache_inside_node (0.25s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/cache_reload (1.54s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/cache_reload
functional_test.go:1143: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh sudo crictl rmi registry.k8s.io/pause:latest
functional_test.go:1149: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh sudo crictl inspecti registry.k8s.io/pause:latest
functional_test.go:1149: (dbg) Non-zero exit: out/minikube-linux-amd64 -p functional-683521 ssh sudo crictl inspecti registry.k8s.io/pause:latest: exit status 1 (253.030763ms)

                                                
                                                
-- stdout --
	FATA[0000] no such image "registry.k8s.io/pause:latest" present 

                                                
                                                
-- /stdout --
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
functional_test.go:1154: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 cache reload
functional_test.go:1159: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh sudo crictl inspecti registry.k8s.io/pause:latest
--- PASS: TestFunctional/serial/CacheCmd/cache/cache_reload (1.54s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/delete (0.08s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/delete
functional_test.go:1168: (dbg) Run:  out/minikube-linux-amd64 cache delete registry.k8s.io/pause:3.1
functional_test.go:1168: (dbg) Run:  out/minikube-linux-amd64 cache delete registry.k8s.io/pause:latest
--- PASS: TestFunctional/serial/CacheCmd/cache/delete (0.08s)

                                                
                                    
x
+
TestFunctional/serial/MinikubeKubectlCmd (0.1s)

                                                
                                                
=== RUN   TestFunctional/serial/MinikubeKubectlCmd
functional_test.go:712: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 kubectl -- --context functional-683521 get pods
--- PASS: TestFunctional/serial/MinikubeKubectlCmd (0.10s)

                                                
                                    
x
+
TestFunctional/serial/MinikubeKubectlCmdDirectly (0.1s)

                                                
                                                
=== RUN   TestFunctional/serial/MinikubeKubectlCmdDirectly
functional_test.go:737: (dbg) Run:  out/kubectl --context functional-683521 get pods
--- PASS: TestFunctional/serial/MinikubeKubectlCmdDirectly (0.10s)

                                                
                                    
x
+
TestFunctional/serial/ExtraConfig (32.03s)

                                                
                                                
=== RUN   TestFunctional/serial/ExtraConfig
functional_test.go:753: (dbg) Run:  out/minikube-linux-amd64 start -p functional-683521 --extra-config=apiserver.enable-admission-plugins=NamespaceAutoProvision --wait=all
E0731 10:41:39.843911   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/client.crt: no such file or directory
functional_test.go:753: (dbg) Done: out/minikube-linux-amd64 start -p functional-683521 --extra-config=apiserver.enable-admission-plugins=NamespaceAutoProvision --wait=all: (32.032883874s)
functional_test.go:757: restart took 32.033003565s for "functional-683521" cluster.
--- PASS: TestFunctional/serial/ExtraConfig (32.03s)

                                                
                                    
x
+
TestFunctional/serial/ComponentHealth (0.07s)

                                                
                                                
=== RUN   TestFunctional/serial/ComponentHealth
functional_test.go:806: (dbg) Run:  kubectl --context functional-683521 get po -l tier=control-plane -n kube-system -o=json
functional_test.go:821: etcd phase: Running
functional_test.go:831: etcd status: Ready
functional_test.go:821: kube-apiserver phase: Running
functional_test.go:831: kube-apiserver status: Ready
functional_test.go:821: kube-controller-manager phase: Running
functional_test.go:831: kube-controller-manager status: Ready
functional_test.go:821: kube-scheduler phase: Running
functional_test.go:831: kube-scheduler status: Ready
--- PASS: TestFunctional/serial/ComponentHealth (0.07s)

                                                
                                    
x
+
TestFunctional/serial/LogsCmd (1.26s)

                                                
                                                
=== RUN   TestFunctional/serial/LogsCmd
functional_test.go:1232: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 logs
functional_test.go:1232: (dbg) Done: out/minikube-linux-amd64 -p functional-683521 logs: (1.25812135s)
--- PASS: TestFunctional/serial/LogsCmd (1.26s)

                                                
                                    
x
+
TestFunctional/serial/LogsFileCmd (1.29s)

                                                
                                                
=== RUN   TestFunctional/serial/LogsFileCmd
functional_test.go:1246: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 logs --file /tmp/TestFunctionalserialLogsFileCmd3297394332/001/logs.txt
functional_test.go:1246: (dbg) Done: out/minikube-linux-amd64 -p functional-683521 logs --file /tmp/TestFunctionalserialLogsFileCmd3297394332/001/logs.txt: (1.290874356s)
--- PASS: TestFunctional/serial/LogsFileCmd (1.29s)

                                                
                                    
x
+
TestFunctional/serial/InvalidService (4.13s)

                                                
                                                
=== RUN   TestFunctional/serial/InvalidService
functional_test.go:2317: (dbg) Run:  kubectl --context functional-683521 apply -f testdata/invalidsvc.yaml
functional_test.go:2331: (dbg) Run:  out/minikube-linux-amd64 service invalid-svc -p functional-683521
functional_test.go:2331: (dbg) Non-zero exit: out/minikube-linux-amd64 service invalid-svc -p functional-683521: exit status 115 (305.856058ms)

                                                
                                                
-- stdout --
	|-----------|-------------|-------------|---------------------------|
	| NAMESPACE |    NAME     | TARGET PORT |            URL            |
	|-----------|-------------|-------------|---------------------------|
	| default   | invalid-svc |          80 | http://192.168.49.2:30852 |
	|-----------|-------------|-------------|---------------------------|
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	X Exiting due to SVC_UNREACHABLE: service not available: no running pod for service invalid-svc found
	* 
	╭─────────────────────────────────────────────────────────────────────────────────────────────╮
	│                                                                                             │
	│    * If the above advice does not help, please let us know:                                 │
	│      https://github.com/kubernetes/minikube/issues/new/choose                               │
	│                                                                                             │
	│    * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue.    │
	│    * Please also attach the following file to the GitHub issue:                             │
	│    * - /tmp/minikube_service_96b204199e3191fa1740d4430b018a3c8028d52d_0.log                 │
	│                                                                                             │
	╰─────────────────────────────────────────────────────────────────────────────────────────────╯

                                                
                                                
** /stderr **
functional_test.go:2323: (dbg) Run:  kubectl --context functional-683521 delete -f testdata/invalidsvc.yaml
--- PASS: TestFunctional/serial/InvalidService (4.13s)

                                                
                                    
x
+
TestFunctional/parallel/ConfigCmd (0.33s)

                                                
                                                
=== RUN   TestFunctional/parallel/ConfigCmd
=== PAUSE TestFunctional/parallel/ConfigCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ConfigCmd
functional_test.go:1195: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 config unset cpus
functional_test.go:1195: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 config get cpus
functional_test.go:1195: (dbg) Non-zero exit: out/minikube-linux-amd64 -p functional-683521 config get cpus: exit status 14 (52.034161ms)

                                                
                                                
** stderr ** 
	Error: specified key could not be found in config

                                                
                                                
** /stderr **
functional_test.go:1195: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 config set cpus 2
functional_test.go:1195: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 config get cpus
functional_test.go:1195: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 config unset cpus
functional_test.go:1195: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 config get cpus
functional_test.go:1195: (dbg) Non-zero exit: out/minikube-linux-amd64 -p functional-683521 config get cpus: exit status 14 (59.288924ms)

                                                
                                                
** stderr ** 
	Error: specified key could not be found in config

                                                
                                                
** /stderr **
--- PASS: TestFunctional/parallel/ConfigCmd (0.33s)

                                                
                                    
x
+
TestFunctional/parallel/DashboardCmd (9.21s)

                                                
                                                
=== RUN   TestFunctional/parallel/DashboardCmd
=== PAUSE TestFunctional/parallel/DashboardCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/DashboardCmd
functional_test.go:901: (dbg) daemon: [out/minikube-linux-amd64 dashboard --url --port 36195 -p functional-683521 --alsologtostderr -v=1]
functional_test.go:906: (dbg) stopping [out/minikube-linux-amd64 dashboard --url --port 36195 -p functional-683521 --alsologtostderr -v=1] ...
helpers_test.go:508: unable to kill pid 46270: os: process already finished
--- PASS: TestFunctional/parallel/DashboardCmd (9.21s)

                                                
                                    
x
+
TestFunctional/parallel/DryRun (0.95s)

                                                
                                                
=== RUN   TestFunctional/parallel/DryRun
=== PAUSE TestFunctional/parallel/DryRun

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/DryRun
functional_test.go:970: (dbg) Run:  out/minikube-linux-amd64 start -p functional-683521 --dry-run --memory 250MB --alsologtostderr --driver=docker  --container-runtime=crio
functional_test.go:970: (dbg) Non-zero exit: out/minikube-linux-amd64 start -p functional-683521 --dry-run --memory 250MB --alsologtostderr --driver=docker  --container-runtime=crio: exit status 23 (483.748401ms)

                                                
                                                
-- stdout --
	* [functional-683521] minikube v1.31.1 on Ubuntu 20.04 (kvm/amd64)
	  - MINIKUBE_LOCATION=16969
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/16969-5799/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/16969-5799/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-amd64
	  - MINIKUBE_FORCE_SYSTEMD=
	* Using the docker driver based on existing profile
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	I0731 10:42:26.226223   45552 out.go:296] Setting OutFile to fd 1 ...
	I0731 10:42:26.226377   45552 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I0731 10:42:26.226406   45552 out.go:309] Setting ErrFile to fd 2...
	I0731 10:42:26.226416   45552 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I0731 10:42:26.226644   45552 root.go:338] Updating PATH: /home/jenkins/minikube-integration/16969-5799/.minikube/bin
	I0731 10:42:26.227362   45552 out.go:303] Setting JSON to false
	I0731 10:42:26.228953   45552 start.go:128] hostinfo: {"hostname":"ubuntu-20-agent-15","uptime":1498,"bootTime":1690798648,"procs":530,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1038-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
	I0731 10:42:26.229044   45552 start.go:138] virtualization: kvm guest
	I0731 10:42:26.279064   45552 out.go:177] * [functional-683521] minikube v1.31.1 on Ubuntu 20.04 (kvm/amd64)
	I0731 10:42:26.296431   45552 notify.go:220] Checking for updates...
	I0731 10:42:26.296438   45552 out.go:177]   - MINIKUBE_LOCATION=16969
	I0731 10:42:26.312206   45552 out.go:177]   - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I0731 10:42:26.322552   45552 out.go:177]   - KUBECONFIG=/home/jenkins/minikube-integration/16969-5799/kubeconfig
	I0731 10:42:26.326144   45552 out.go:177]   - MINIKUBE_HOME=/home/jenkins/minikube-integration/16969-5799/.minikube
	I0731 10:42:26.328923   45552 out.go:177]   - MINIKUBE_BIN=out/minikube-linux-amd64
	I0731 10:42:26.335493   45552 out.go:177]   - MINIKUBE_FORCE_SYSTEMD=
	I0731 10:42:26.352782   45552 config.go:182] Loaded profile config "functional-683521": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.27.3
	I0731 10:42:26.353202   45552 driver.go:373] Setting default libvirt URI to qemu:///system
	I0731 10:42:26.377677   45552 docker.go:121] docker version: linux-24.0.5:Docker Engine - Community
	I0731 10:42:26.377812   45552 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0731 10:42:26.431921   45552 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:35 OomKillDisable:true NGoroutines:50 SystemTime:2023-07-31 10:42:26.420760518 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1038-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archi
tecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33648062464 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-15 Labels:[] ExperimentalBuild:false ServerVersion:24.0.5 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:3dce8eb055cbb6872793272b4f20ed16117344f8 Expected:3dce8eb055cbb6872793272b4f20ed16117344f8} RuncCommit:{ID:v1.1.7-0-g860f061 Expected:v1.1.7-0-g860f061} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.20.2] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
	I0731 10:42:26.432046   45552 docker.go:294] overlay module found
	I0731 10:42:26.513988   45552 out.go:177] * Using the docker driver based on existing profile
	I0731 10:42:26.533911   45552 start.go:298] selected driver: docker
	I0731 10:42:26.533949   45552 start.go:898] validating driver "docker" against &{Name:functional-683521 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 Memory:4000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.27.3 ClusterName:functional-683521 Namespace:default APIServerName:minikubeCA APIServerNames:[] A
PIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8441 NodeName:} Nodes:[{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.27.3 ContainerRuntime:crio ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP:
MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0}
	I0731 10:42:26.534048   45552 start.go:909] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
	I0731 10:42:26.550579   45552 out.go:177] 
	W0731 10:42:26.557235   45552 out.go:239] X Exiting due to RSRC_INSUFFICIENT_REQ_MEMORY: Requested memory allocation 250MiB is less than the usable minimum of 1800MB
	X Exiting due to RSRC_INSUFFICIENT_REQ_MEMORY: Requested memory allocation 250MiB is less than the usable minimum of 1800MB
	I0731 10:42:26.583465   45552 out.go:177] 

                                                
                                                
** /stderr **
functional_test.go:987: (dbg) Run:  out/minikube-linux-amd64 start -p functional-683521 --dry-run --alsologtostderr -v=1 --driver=docker  --container-runtime=crio
--- PASS: TestFunctional/parallel/DryRun (0.95s)

                                                
                                    
x
+
TestFunctional/parallel/InternationalLanguage (0.15s)

                                                
                                                
=== RUN   TestFunctional/parallel/InternationalLanguage
=== PAUSE TestFunctional/parallel/InternationalLanguage

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/InternationalLanguage
functional_test.go:1016: (dbg) Run:  out/minikube-linux-amd64 start -p functional-683521 --dry-run --memory 250MB --alsologtostderr --driver=docker  --container-runtime=crio
functional_test.go:1016: (dbg) Non-zero exit: out/minikube-linux-amd64 start -p functional-683521 --dry-run --memory 250MB --alsologtostderr --driver=docker  --container-runtime=crio: exit status 23 (147.301773ms)

                                                
                                                
-- stdout --
	* [functional-683521] minikube v1.31.1 sur Ubuntu 20.04 (kvm/amd64)
	  - MINIKUBE_LOCATION=16969
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/16969-5799/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/16969-5799/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-amd64
	  - MINIKUBE_FORCE_SYSTEMD=
	* Utilisation du pilote docker basé sur le profil existant
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	I0731 10:42:27.153089   45916 out.go:296] Setting OutFile to fd 1 ...
	I0731 10:42:27.153237   45916 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I0731 10:42:27.153245   45916 out.go:309] Setting ErrFile to fd 2...
	I0731 10:42:27.153249   45916 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I0731 10:42:27.153535   45916 root.go:338] Updating PATH: /home/jenkins/minikube-integration/16969-5799/.minikube/bin
	I0731 10:42:27.154096   45916 out.go:303] Setting JSON to false
	I0731 10:42:27.155266   45916 start.go:128] hostinfo: {"hostname":"ubuntu-20-agent-15","uptime":1499,"bootTime":1690798648,"procs":536,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1038-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
	I0731 10:42:27.155338   45916 start.go:138] virtualization: kvm guest
	I0731 10:42:27.158942   45916 out.go:177] * [functional-683521] minikube v1.31.1 sur Ubuntu 20.04 (kvm/amd64)
	I0731 10:42:27.160365   45916 out.go:177]   - MINIKUBE_LOCATION=16969
	I0731 10:42:27.160397   45916 notify.go:220] Checking for updates...
	I0731 10:42:27.161710   45916 out.go:177]   - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I0731 10:42:27.163122   45916 out.go:177]   - KUBECONFIG=/home/jenkins/minikube-integration/16969-5799/kubeconfig
	I0731 10:42:27.164350   45916 out.go:177]   - MINIKUBE_HOME=/home/jenkins/minikube-integration/16969-5799/.minikube
	I0731 10:42:27.165668   45916 out.go:177]   - MINIKUBE_BIN=out/minikube-linux-amd64
	I0731 10:42:27.166980   45916 out.go:177]   - MINIKUBE_FORCE_SYSTEMD=
	I0731 10:42:27.168623   45916 config.go:182] Loaded profile config "functional-683521": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.27.3
	I0731 10:42:27.169081   45916 driver.go:373] Setting default libvirt URI to qemu:///system
	I0731 10:42:27.192424   45916 docker.go:121] docker version: linux-24.0.5:Docker Engine - Community
	I0731 10:42:27.192533   45916 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0731 10:42:27.245207   45916 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:33 OomKillDisable:true NGoroutines:48 SystemTime:2023-07-31 10:42:27.236881257 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1038-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archi
tecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33648062464 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-15 Labels:[] ExperimentalBuild:false ServerVersion:24.0.5 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:3dce8eb055cbb6872793272b4f20ed16117344f8 Expected:3dce8eb055cbb6872793272b4f20ed16117344f8} RuncCommit:{ID:v1.1.7-0-g860f061 Expected:v1.1.7-0-g860f061} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.20.2] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
	I0731 10:42:27.245301   45916 docker.go:294] overlay module found
	I0731 10:42:27.246911   45916 out.go:177] * Utilisation du pilote docker basé sur le profil existant
	I0731 10:42:27.248080   45916 start.go:298] selected driver: docker
	I0731 10:42:27.248090   45916 start.go:898] validating driver "docker" against &{Name:functional-683521 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 Memory:4000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.27.3 ClusterName:functional-683521 Namespace:default APIServerName:minikubeCA APIServerNames:[] A
PIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8441 NodeName:} Nodes:[{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.27.3 ContainerRuntime:crio ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP:
MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0}
	I0731 10:42:27.248183   45916 start.go:909] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
	I0731 10:42:27.250036   45916 out.go:177] 
	W0731 10:42:27.251241   45916 out.go:239] X Fermeture en raison de RSRC_INSUFFICIENT_REQ_MEMORY : L'allocation de mémoire demandée 250 Mio est inférieure au minimum utilisable de 1800 Mo
	X Fermeture en raison de RSRC_INSUFFICIENT_REQ_MEMORY : L'allocation de mémoire demandée 250 Mio est inférieure au minimum utilisable de 1800 Mo
	I0731 10:42:27.252704   45916 out.go:177] 

                                                
                                                
** /stderr **
--- PASS: TestFunctional/parallel/InternationalLanguage (0.15s)

                                                
                                    
x
+
TestFunctional/parallel/StatusCmd (0.96s)

                                                
                                                
=== RUN   TestFunctional/parallel/StatusCmd
=== PAUSE TestFunctional/parallel/StatusCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/StatusCmd
functional_test.go:850: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 status
functional_test.go:856: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 status -f host:{{.Host}},kublet:{{.Kubelet}},apiserver:{{.APIServer}},kubeconfig:{{.Kubeconfig}}
functional_test.go:868: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 status -o json
--- PASS: TestFunctional/parallel/StatusCmd (0.96s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmdConnect (11.62s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmdConnect
=== PAUSE TestFunctional/parallel/ServiceCmdConnect

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ServiceCmdConnect
functional_test.go:1628: (dbg) Run:  kubectl --context functional-683521 create deployment hello-node-connect --image=registry.k8s.io/echoserver:1.8
functional_test.go:1634: (dbg) Run:  kubectl --context functional-683521 expose deployment hello-node-connect --type=NodePort --port=8080
functional_test.go:1639: (dbg) TestFunctional/parallel/ServiceCmdConnect: waiting 10m0s for pods matching "app=hello-node-connect" in namespace "default" ...
helpers_test.go:344: "hello-node-connect-6fb669fc84-m6brw" [145b17c3-bd2e-46f5-ae70-c3fa0edc9fbd] Pending / Ready:ContainersNotReady (containers with unready status: [echoserver]) / ContainersReady:ContainersNotReady (containers with unready status: [echoserver])
helpers_test.go:344: "hello-node-connect-6fb669fc84-m6brw" [145b17c3-bd2e-46f5-ae70-c3fa0edc9fbd] Running
E0731 10:42:20.804993   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/client.crt: no such file or directory
functional_test.go:1639: (dbg) TestFunctional/parallel/ServiceCmdConnect: app=hello-node-connect healthy within 11.012605527s
functional_test.go:1648: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 service hello-node-connect --url
functional_test.go:1654: found endpoint for hello-node-connect: http://192.168.49.2:30467
functional_test.go:1674: http://192.168.49.2:30467: success! body:

                                                
                                                

                                                
                                                
Hostname: hello-node-connect-6fb669fc84-m6brw

                                                
                                                
Pod Information:
	-no pod information available-

                                                
                                                
Server values:
	server_version=nginx: 1.13.3 - lua: 10008

                                                
                                                
Request Information:
	client_address=10.244.0.1
	method=GET
	real path=/
	query=
	request_version=1.1
	request_uri=http://192.168.49.2:8080/

                                                
                                                
Request Headers:
	accept-encoding=gzip
	host=192.168.49.2:30467
	user-agent=Go-http-client/1.1

                                                
                                                
Request Body:
	-no body in request-

                                                
                                                
--- PASS: TestFunctional/parallel/ServiceCmdConnect (11.62s)

                                                
                                    
x
+
TestFunctional/parallel/AddonsCmd (0.16s)

                                                
                                                
=== RUN   TestFunctional/parallel/AddonsCmd
=== PAUSE TestFunctional/parallel/AddonsCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/AddonsCmd
functional_test.go:1689: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 addons list
functional_test.go:1701: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 addons list -o json
--- PASS: TestFunctional/parallel/AddonsCmd (0.16s)

                                                
                                    
x
+
TestFunctional/parallel/PersistentVolumeClaim (30.95s)

                                                
                                                
=== RUN   TestFunctional/parallel/PersistentVolumeClaim
=== PAUSE TestFunctional/parallel/PersistentVolumeClaim

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/PersistentVolumeClaim
functional_test_pvc_test.go:44: (dbg) TestFunctional/parallel/PersistentVolumeClaim: waiting 4m0s for pods matching "integration-test=storage-provisioner" in namespace "kube-system" ...
helpers_test.go:344: "storage-provisioner" [dbd00efd-029d-4d7a-b42e-5c8554e33f07] Running
functional_test_pvc_test.go:44: (dbg) TestFunctional/parallel/PersistentVolumeClaim: integration-test=storage-provisioner healthy within 5.013261572s
functional_test_pvc_test.go:49: (dbg) Run:  kubectl --context functional-683521 get storageclass -o=json
functional_test_pvc_test.go:69: (dbg) Run:  kubectl --context functional-683521 apply -f testdata/storage-provisioner/pvc.yaml
functional_test_pvc_test.go:76: (dbg) Run:  kubectl --context functional-683521 get pvc myclaim -o=json
functional_test_pvc_test.go:125: (dbg) Run:  kubectl --context functional-683521 apply -f testdata/storage-provisioner/pod.yaml
functional_test_pvc_test.go:130: (dbg) TestFunctional/parallel/PersistentVolumeClaim: waiting 3m0s for pods matching "test=storage-provisioner" in namespace "default" ...
helpers_test.go:344: "sp-pod" [25517a34-ac5e-4ae7-870b-3a6effcfe826] Pending
helpers_test.go:344: "sp-pod" [25517a34-ac5e-4ae7-870b-3a6effcfe826] Pending / Ready:ContainersNotReady (containers with unready status: [myfrontend]) / ContainersReady:ContainersNotReady (containers with unready status: [myfrontend])
helpers_test.go:344: "sp-pod" [25517a34-ac5e-4ae7-870b-3a6effcfe826] Running
functional_test_pvc_test.go:130: (dbg) TestFunctional/parallel/PersistentVolumeClaim: test=storage-provisioner healthy within 11.014824356s
functional_test_pvc_test.go:100: (dbg) Run:  kubectl --context functional-683521 exec sp-pod -- touch /tmp/mount/foo
functional_test_pvc_test.go:106: (dbg) Run:  kubectl --context functional-683521 delete -f testdata/storage-provisioner/pod.yaml
functional_test_pvc_test.go:125: (dbg) Run:  kubectl --context functional-683521 apply -f testdata/storage-provisioner/pod.yaml
functional_test_pvc_test.go:130: (dbg) TestFunctional/parallel/PersistentVolumeClaim: waiting 3m0s for pods matching "test=storage-provisioner" in namespace "default" ...
helpers_test.go:344: "sp-pod" [0057bb7f-b603-4980-862a-7fc55b2598f5] Pending / Ready:ContainersNotReady (containers with unready status: [myfrontend]) / ContainersReady:ContainersNotReady (containers with unready status: [myfrontend])
helpers_test.go:344: "sp-pod" [0057bb7f-b603-4980-862a-7fc55b2598f5] Running
functional_test_pvc_test.go:130: (dbg) TestFunctional/parallel/PersistentVolumeClaim: test=storage-provisioner healthy within 13.011979249s
functional_test_pvc_test.go:114: (dbg) Run:  kubectl --context functional-683521 exec sp-pod -- ls /tmp/mount
--- PASS: TestFunctional/parallel/PersistentVolumeClaim (30.95s)

                                                
                                    
x
+
TestFunctional/parallel/SSHCmd (0.57s)

                                                
                                                
=== RUN   TestFunctional/parallel/SSHCmd
=== PAUSE TestFunctional/parallel/SSHCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/SSHCmd
functional_test.go:1724: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh "echo hello"
functional_test.go:1741: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh "cat /etc/hostname"
--- PASS: TestFunctional/parallel/SSHCmd (0.57s)

                                                
                                    
x
+
TestFunctional/parallel/CpCmd (1.14s)

                                                
                                                
=== RUN   TestFunctional/parallel/CpCmd
=== PAUSE TestFunctional/parallel/CpCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/CpCmd
helpers_test.go:556: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 cp testdata/cp-test.txt /home/docker/cp-test.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh -n functional-683521 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 cp functional-683521:/home/docker/cp-test.txt /tmp/TestFunctionalparallelCpCmd3523435247/001/cp-test.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh -n functional-683521 "sudo cat /home/docker/cp-test.txt"
--- PASS: TestFunctional/parallel/CpCmd (1.14s)

                                                
                                    
x
+
TestFunctional/parallel/MySQL (21.83s)

                                                
                                                
=== RUN   TestFunctional/parallel/MySQL
=== PAUSE TestFunctional/parallel/MySQL

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/MySQL
functional_test.go:1789: (dbg) Run:  kubectl --context functional-683521 replace --force -f testdata/mysql.yaml
functional_test.go:1795: (dbg) TestFunctional/parallel/MySQL: waiting 10m0s for pods matching "app=mysql" in namespace "default" ...
helpers_test.go:344: "mysql-7db894d786-74s57" [03d0cc61-f2a7-4d46-9641-77099d6c7b73] Pending
helpers_test.go:344: "mysql-7db894d786-74s57" [03d0cc61-f2a7-4d46-9641-77099d6c7b73] Pending / Ready:ContainersNotReady (containers with unready status: [mysql]) / ContainersReady:ContainersNotReady (containers with unready status: [mysql])
helpers_test.go:344: "mysql-7db894d786-74s57" [03d0cc61-f2a7-4d46-9641-77099d6c7b73] Running
functional_test.go:1795: (dbg) TestFunctional/parallel/MySQL: app=mysql healthy within 20.013015279s
functional_test.go:1803: (dbg) Run:  kubectl --context functional-683521 exec mysql-7db894d786-74s57 -- mysql -ppassword -e "show databases;"
functional_test.go:1803: (dbg) Non-zero exit: kubectl --context functional-683521 exec mysql-7db894d786-74s57 -- mysql -ppassword -e "show databases;": exit status 1 (171.558683ms)

                                                
                                                
** stderr ** 
	mysql: [Warning] Using a password on the command line interface can be insecure.
	ERROR 2002 (HY000): Can't connect to local MySQL server through socket '/var/run/mysqld/mysqld.sock' (2)
	command terminated with exit code 1

                                                
                                                
** /stderr **
functional_test.go:1803: (dbg) Run:  kubectl --context functional-683521 exec mysql-7db894d786-74s57 -- mysql -ppassword -e "show databases;"
--- PASS: TestFunctional/parallel/MySQL (21.83s)

                                                
                                    
x
+
TestFunctional/parallel/FileSync (0.26s)

                                                
                                                
=== RUN   TestFunctional/parallel/FileSync
=== PAUSE TestFunctional/parallel/FileSync

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/FileSync
functional_test.go:1925: Checking for existence of /etc/test/nested/copy/12537/hosts within VM
functional_test.go:1927: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh "sudo cat /etc/test/nested/copy/12537/hosts"
functional_test.go:1932: file sync test content: Test file for checking file sync process
--- PASS: TestFunctional/parallel/FileSync (0.26s)

                                                
                                    
x
+
TestFunctional/parallel/CertSync (1.53s)

                                                
                                                
=== RUN   TestFunctional/parallel/CertSync
=== PAUSE TestFunctional/parallel/CertSync

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/CertSync
functional_test.go:1968: Checking for existence of /etc/ssl/certs/12537.pem within VM
functional_test.go:1969: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh "sudo cat /etc/ssl/certs/12537.pem"
functional_test.go:1968: Checking for existence of /usr/share/ca-certificates/12537.pem within VM
functional_test.go:1969: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh "sudo cat /usr/share/ca-certificates/12537.pem"
functional_test.go:1968: Checking for existence of /etc/ssl/certs/51391683.0 within VM
functional_test.go:1969: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh "sudo cat /etc/ssl/certs/51391683.0"
functional_test.go:1995: Checking for existence of /etc/ssl/certs/125372.pem within VM
functional_test.go:1996: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh "sudo cat /etc/ssl/certs/125372.pem"
functional_test.go:1995: Checking for existence of /usr/share/ca-certificates/125372.pem within VM
functional_test.go:1996: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh "sudo cat /usr/share/ca-certificates/125372.pem"
functional_test.go:1995: Checking for existence of /etc/ssl/certs/3ec20f2e.0 within VM
functional_test.go:1996: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh "sudo cat /etc/ssl/certs/3ec20f2e.0"
--- PASS: TestFunctional/parallel/CertSync (1.53s)

                                                
                                    
x
+
TestFunctional/parallel/NodeLabels (0.08s)

                                                
                                                
=== RUN   TestFunctional/parallel/NodeLabels
=== PAUSE TestFunctional/parallel/NodeLabels

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/NodeLabels
functional_test.go:218: (dbg) Run:  kubectl --context functional-683521 get nodes --output=go-template "--template='{{range $k, $v := (index .items 0).metadata.labels}}{{$k}} {{end}}'"
--- PASS: TestFunctional/parallel/NodeLabels (0.08s)

                                                
                                    
x
+
TestFunctional/parallel/NonActiveRuntimeDisabled (0.57s)

                                                
                                                
=== RUN   TestFunctional/parallel/NonActiveRuntimeDisabled
=== PAUSE TestFunctional/parallel/NonActiveRuntimeDisabled

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/NonActiveRuntimeDisabled
functional_test.go:2023: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh "sudo systemctl is-active docker"
functional_test.go:2023: (dbg) Non-zero exit: out/minikube-linux-amd64 -p functional-683521 ssh "sudo systemctl is-active docker": exit status 1 (283.268827ms)

                                                
                                                
-- stdout --
	inactive

                                                
                                                
-- /stdout --
** stderr ** 
	ssh: Process exited with status 3

                                                
                                                
** /stderr **
functional_test.go:2023: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh "sudo systemctl is-active containerd"
functional_test.go:2023: (dbg) Non-zero exit: out/minikube-linux-amd64 -p functional-683521 ssh "sudo systemctl is-active containerd": exit status 1 (288.656341ms)

                                                
                                                
-- stdout --
	inactive

                                                
                                                
-- /stdout --
** stderr ** 
	ssh: Process exited with status 3

                                                
                                                
** /stderr **
--- PASS: TestFunctional/parallel/NonActiveRuntimeDisabled (0.57s)

                                                
                                    
x
+
TestFunctional/parallel/License (0.17s)

                                                
                                                
=== RUN   TestFunctional/parallel/License
=== PAUSE TestFunctional/parallel/License

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/License
functional_test.go:2284: (dbg) Run:  out/minikube-linux-amd64 license
--- PASS: TestFunctional/parallel/License (0.17s)

                                                
                                    
x
+
TestFunctional/parallel/Version/short (0.04s)

                                                
                                                
=== RUN   TestFunctional/parallel/Version/short
=== PAUSE TestFunctional/parallel/Version/short

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/Version/short
functional_test.go:2252: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 version --short
--- PASS: TestFunctional/parallel/Version/short (0.04s)

                                                
                                    
x
+
TestFunctional/parallel/Version/components (1.16s)

                                                
                                                
=== RUN   TestFunctional/parallel/Version/components
=== PAUSE TestFunctional/parallel/Version/components

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/Version/components
functional_test.go:2266: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 version -o=json --components
functional_test.go:2266: (dbg) Done: out/minikube-linux-amd64 -p functional-683521 version -o=json --components: (1.1570367s)
--- PASS: TestFunctional/parallel/Version/components (1.16s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/DeployApp (9.17s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/DeployApp
functional_test.go:1438: (dbg) Run:  kubectl --context functional-683521 create deployment hello-node --image=registry.k8s.io/echoserver:1.8
functional_test.go:1444: (dbg) Run:  kubectl --context functional-683521 expose deployment hello-node --type=NodePort --port=8080
functional_test.go:1449: (dbg) TestFunctional/parallel/ServiceCmd/DeployApp: waiting 10m0s for pods matching "app=hello-node" in namespace "default" ...
helpers_test.go:344: "hello-node-775766b4cc-8sk4q" [f426e370-3da5-4336-9b3d-8b49bcc5f26e] Pending / Ready:ContainersNotReady (containers with unready status: [echoserver]) / ContainersReady:ContainersNotReady (containers with unready status: [echoserver])
helpers_test.go:344: "hello-node-775766b4cc-8sk4q" [f426e370-3da5-4336-9b3d-8b49bcc5f26e] Running
functional_test.go:1449: (dbg) TestFunctional/parallel/ServiceCmd/DeployApp: app=hello-node healthy within 9.01643258s
--- PASS: TestFunctional/parallel/ServiceCmd/DeployApp (9.17s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/RunSecondTunnel (0.44s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/RunSecondTunnel
functional_test_tunnel_test.go:154: (dbg) daemon: [out/minikube-linux-amd64 -p functional-683521 tunnel --alsologtostderr]
functional_test_tunnel_test.go:154: (dbg) daemon: [out/minikube-linux-amd64 -p functional-683521 tunnel --alsologtostderr]
functional_test_tunnel_test.go:194: (dbg) stopping [out/minikube-linux-amd64 -p functional-683521 tunnel --alsologtostderr] ...
helpers_test.go:490: unable to find parent, assuming dead: process does not exist
functional_test_tunnel_test.go:194: (dbg) stopping [out/minikube-linux-amd64 -p functional-683521 tunnel --alsologtostderr] ...
helpers_test.go:508: unable to kill pid 42853: os: process already finished
helpers_test.go:502: unable to terminate pid 42528: os: process already finished
--- PASS: TestFunctional/parallel/TunnelCmd/serial/RunSecondTunnel (0.44s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/StartTunnel (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/StartTunnel
functional_test_tunnel_test.go:129: (dbg) daemon: [out/minikube-linux-amd64 -p functional-683521 tunnel --alsologtostderr]
--- PASS: TestFunctional/parallel/TunnelCmd/serial/StartTunnel (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/WaitService/Setup (11.42s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/WaitService/Setup
functional_test_tunnel_test.go:212: (dbg) Run:  kubectl --context functional-683521 apply -f testdata/testsvc.yaml
functional_test_tunnel_test.go:216: (dbg) TestFunctional/parallel/TunnelCmd/serial/WaitService/Setup: waiting 4m0s for pods matching "run=nginx-svc" in namespace "default" ...
helpers_test.go:344: "nginx-svc" [9d4544e1-411d-4031-83de-3111c011e36b] Pending / Ready:ContainersNotReady (containers with unready status: [nginx]) / ContainersReady:ContainersNotReady (containers with unready status: [nginx])
helpers_test.go:344: "nginx-svc" [9d4544e1-411d-4031-83de-3111c011e36b] Running
functional_test_tunnel_test.go:216: (dbg) TestFunctional/parallel/TunnelCmd/serial/WaitService/Setup: run=nginx-svc healthy within 11.012150121s
--- PASS: TestFunctional/parallel/TunnelCmd/serial/WaitService/Setup (11.42s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/List (0.63s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/List
functional_test.go:1458: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 service list
--- PASS: TestFunctional/parallel/ServiceCmd/List (0.63s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/JSONOutput (0.5s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/JSONOutput
functional_test.go:1488: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 service list -o json
functional_test.go:1493: Took "503.399139ms" to run "out/minikube-linux-amd64 -p functional-683521 service list -o json"
--- PASS: TestFunctional/parallel/ServiceCmd/JSONOutput (0.50s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/HTTPS (0.33s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/HTTPS
functional_test.go:1508: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 service --namespace=default --https --url hello-node
functional_test.go:1521: found endpoint: https://192.168.49.2:32017
--- PASS: TestFunctional/parallel/ServiceCmd/HTTPS (0.33s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/Format (0.37s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/Format
functional_test.go:1539: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 service hello-node --url --format={{.IP}}
--- PASS: TestFunctional/parallel/ServiceCmd/Format (0.37s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/URL (0.33s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/URL
functional_test.go:1558: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 service hello-node --url
functional_test.go:1564: found endpoint for hello-node: http://192.168.49.2:32017
--- PASS: TestFunctional/parallel/ServiceCmd/URL (0.33s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/WaitService/IngressIP (0.06s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/WaitService/IngressIP
functional_test_tunnel_test.go:234: (dbg) Run:  kubectl --context functional-683521 get svc nginx-svc -o jsonpath={.status.loadBalancer.ingress[0].ip}
--- PASS: TestFunctional/parallel/TunnelCmd/serial/WaitService/IngressIP (0.06s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/AccessDirect (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/AccessDirect
functional_test_tunnel_test.go:299: tunnel at http://10.97.146.148 is working!
--- PASS: TestFunctional/parallel/TunnelCmd/serial/AccessDirect (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/DeleteTunnel (0.11s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/DeleteTunnel
functional_test_tunnel_test.go:434: (dbg) stopping [out/minikube-linux-amd64 -p functional-683521 tunnel --alsologtostderr] ...
--- PASS: TestFunctional/parallel/TunnelCmd/serial/DeleteTunnel (0.11s)

                                                
                                    
x
+
TestFunctional/parallel/UpdateContextCmd/no_changes (0.12s)

                                                
                                                
=== RUN   TestFunctional/parallel/UpdateContextCmd/no_changes
=== PAUSE TestFunctional/parallel/UpdateContextCmd/no_changes

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/UpdateContextCmd/no_changes
functional_test.go:2115: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 update-context --alsologtostderr -v=2
--- PASS: TestFunctional/parallel/UpdateContextCmd/no_changes (0.12s)

                                                
                                    
x
+
TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster (0.13s)

                                                
                                                
=== RUN   TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster
=== PAUSE TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster
functional_test.go:2115: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 update-context --alsologtostderr -v=2
--- PASS: TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster (0.13s)

                                                
                                    
x
+
TestFunctional/parallel/UpdateContextCmd/no_clusters (0.13s)

                                                
                                                
=== RUN   TestFunctional/parallel/UpdateContextCmd/no_clusters
=== PAUSE TestFunctional/parallel/UpdateContextCmd/no_clusters

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/UpdateContextCmd/no_clusters
functional_test.go:2115: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 update-context --alsologtostderr -v=2
--- PASS: TestFunctional/parallel/UpdateContextCmd/no_clusters (0.13s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageListShort (0.22s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageListShort
=== PAUSE TestFunctional/parallel/ImageCommands/ImageListShort

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ImageCommands/ImageListShort
functional_test.go:260: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 image ls --format short --alsologtostderr
functional_test.go:265: (dbg) Stdout: out/minikube-linux-amd64 -p functional-683521 image ls --format short --alsologtostderr:
registry.k8s.io/pause:latest
registry.k8s.io/pause:3.9
registry.k8s.io/pause:3.3
registry.k8s.io/pause:3.1
registry.k8s.io/kube-scheduler:v1.27.3
registry.k8s.io/kube-proxy:v1.27.3
registry.k8s.io/kube-controller-manager:v1.27.3
registry.k8s.io/kube-apiserver:v1.27.3
registry.k8s.io/etcd:3.5.7-0
registry.k8s.io/echoserver:1.8
registry.k8s.io/coredns/coredns:v1.10.1
gcr.io/k8s-minikube/storage-provisioner:v5
gcr.io/k8s-minikube/busybox:1.28.4-glibc
gcr.io/google-containers/addon-resizer:functional-683521
docker.io/library/nginx:latest
docker.io/library/nginx:alpine
docker.io/library/mysql:5.7
docker.io/kindest/kindnetd:v20230511-dc714da8
functional_test.go:268: (dbg) Stderr: out/minikube-linux-amd64 -p functional-683521 image ls --format short --alsologtostderr:
I0731 10:42:51.413323   50131 out.go:296] Setting OutFile to fd 1 ...
I0731 10:42:51.413486   50131 out.go:343] TERM=,COLORTERM=, which probably does not support color
I0731 10:42:51.413498   50131 out.go:309] Setting ErrFile to fd 2...
I0731 10:42:51.413503   50131 out.go:343] TERM=,COLORTERM=, which probably does not support color
I0731 10:42:51.413774   50131 root.go:338] Updating PATH: /home/jenkins/minikube-integration/16969-5799/.minikube/bin
I0731 10:42:51.414300   50131 config.go:182] Loaded profile config "functional-683521": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.27.3
I0731 10:42:51.414396   50131 config.go:182] Loaded profile config "functional-683521": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.27.3
I0731 10:42:51.414764   50131 cli_runner.go:164] Run: docker container inspect functional-683521 --format={{.State.Status}}
I0731 10:42:51.434108   50131 ssh_runner.go:195] Run: systemctl --version
I0731 10:42:51.434163   50131 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-683521
I0731 10:42:51.450943   50131 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32782 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/functional-683521/id_rsa Username:docker}
I0731 10:42:51.546889   50131 ssh_runner.go:195] Run: sudo crictl images --output json
E0731 10:42:51.584942   50131 logFile.go:53] failed to close the audit log: invalid argument
W0731 10:42:51.584956   50131 root.go:91] failed to log command end to audit: failed to convert logs to rows: failed to unmarshal "{\"specversion\":\"1.0\",\"id\":\"59b6a9e7-0272-48db-9266-5e53e8190685\",\"source\":\"https://minikube.sigs.k8s.io/\",\"type\":\"io.k8s.sigs.minikube.audit\",\"datacontenttype\":\"application/json\",": unexpected end of JSON input
--- PASS: TestFunctional/parallel/ImageCommands/ImageListShort (0.22s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageListTable (0.22s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageListTable
=== PAUSE TestFunctional/parallel/ImageCommands/ImageListTable

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ImageCommands/ImageListTable
functional_test.go:260: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 image ls --format table --alsologtostderr
functional_test.go:265: (dbg) Stdout: out/minikube-linux-amd64 -p functional-683521 image ls --format table --alsologtostderr:
|-----------------------------------------|--------------------|---------------|--------|
|                  Image                  |        Tag         |   Image ID    |  Size  |
|-----------------------------------------|--------------------|---------------|--------|
| gcr.io/google-containers/addon-resizer  | functional-683521  | ffd4cfbbe753e | 34.1MB |
| gcr.io/k8s-minikube/busybox             | 1.28.4-glibc       | 56cc512116c8f | 4.63MB |
| registry.k8s.io/kube-scheduler          | v1.27.3            | 41697ceeb70b3 | 59.8MB |
| registry.k8s.io/pause                   | 3.1                | da86e6ba6ca19 | 747kB  |
| registry.k8s.io/pause                   | 3.3                | 0184c1613d929 | 686kB  |
| docker.io/library/mysql                 | 5.7                | d7b085374dbc1 | 601MB  |
| docker.io/library/nginx                 | latest             | 89da1fb6dcb96 | 191MB  |
| registry.k8s.io/kube-apiserver          | v1.27.3            | 08a0c939e61b7 | 122MB  |
| registry.k8s.io/kube-proxy              | v1.27.3            | 5780543258cf0 | 72.7MB |
| registry.k8s.io/pause                   | latest             | 350b164e7ae1d | 247kB  |
| gcr.io/k8s-minikube/storage-provisioner | v5                 | 6e38f40d628db | 31.5MB |
| registry.k8s.io/coredns/coredns         | v1.10.1            | ead0a4a53df89 | 53.6MB |
| registry.k8s.io/etcd                    | 3.5.7-0            | 86b6af7dd652c | 297MB  |
| registry.k8s.io/kube-controller-manager | v1.27.3            | 7cffc01dba0e1 | 114MB  |
| registry.k8s.io/pause                   | 3.9                | e6f1816883972 | 750kB  |
| docker.io/kindest/kindnetd              | v20230511-dc714da8 | b0b1fa0f58c6e | 65.2MB |
| docker.io/library/nginx                 | alpine             | 4937520ae206c | 43.2MB |
| registry.k8s.io/echoserver              | 1.8                | 82e4c8a736a4f | 97.8MB |
|-----------------------------------------|--------------------|---------------|--------|
functional_test.go:268: (dbg) Stderr: out/minikube-linux-amd64 -p functional-683521 image ls --format table --alsologtostderr:
I0731 10:42:51.629563   50290 out.go:296] Setting OutFile to fd 1 ...
I0731 10:42:51.629650   50290 out.go:343] TERM=,COLORTERM=, which probably does not support color
I0731 10:42:51.629660   50290 out.go:309] Setting ErrFile to fd 2...
I0731 10:42:51.629664   50290 out.go:343] TERM=,COLORTERM=, which probably does not support color
I0731 10:42:51.629862   50290 root.go:338] Updating PATH: /home/jenkins/minikube-integration/16969-5799/.minikube/bin
I0731 10:42:51.630468   50290 config.go:182] Loaded profile config "functional-683521": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.27.3
I0731 10:42:51.630601   50290 config.go:182] Loaded profile config "functional-683521": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.27.3
I0731 10:42:51.630960   50290 cli_runner.go:164] Run: docker container inspect functional-683521 --format={{.State.Status}}
I0731 10:42:51.649332   50290 ssh_runner.go:195] Run: systemctl --version
I0731 10:42:51.649384   50290 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-683521
I0731 10:42:51.667686   50290 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32782 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/functional-683521/id_rsa Username:docker}
I0731 10:42:51.758541   50290 ssh_runner.go:195] Run: sudo crictl images --output json
--- PASS: TestFunctional/parallel/ImageCommands/ImageListTable (0.22s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageListJson (0.22s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageListJson
=== PAUSE TestFunctional/parallel/ImageCommands/ImageListJson

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ImageCommands/ImageListJson
functional_test.go:260: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 image ls --format json --alsologtostderr
functional_test.go:265: (dbg) Stdout: out/minikube-linux-amd64 -p functional-683521 image ls --format json --alsologtostderr:
[{"id":"b0b1fa0f58c6e932b7f20bf208b2841317a1e8c88cc51b18358310bbd8ec95da","repoDigests":["docker.io/kindest/kindnetd@sha256:6c00e28db008c2afa67d9ee085c86184ec9ae5281d5ae1bd15006746fb9a1974","docker.io/kindest/kindnetd@sha256:7c15172bd152f05b102cea9c8f82ef5abeb56797ec85630923fb98d20fd519e9"],"repoTags":["docker.io/kindest/kindnetd:v20230511-dc714da8"],"size":"65249302"},{"id":"d7b085374dbc1ca6ee83a18b488b9da0425749c87051e8bd8287dc2a2c775ecb","repoDigests":["docker.io/library/mysql@sha256:2eabad08824e3120dbec9096c276e3956e1922636c06fbb383ae9ea9c499bf43","docker.io/library/mysql@sha256:8e044d43c8d38550dc1c935a0797f76adfa55024dd075f30161602395f99f0ca"],"repoTags":["docker.io/library/mysql:5.7"],"size":"601272484"},{"id":"e6f1816883972d4be47bd48879a08919b96afcd344132622e4d444987919323c","repoDigests":["registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097","registry.k8s.io/pause@sha256:8d4106c88ec0bd28001e34c975d65175d994072d65341f62a8ab0754b0fafe10"],"repoTags":["re
gistry.k8s.io/pause:3.9"],"size":"750414"},{"id":"350b164e7ae1dcddeffadd65c76226c9b6dc5553f5179153fb0e36b78f2a5e06","repoDigests":["registry.k8s.io/pause@sha256:5bcb06ed43da4a16c6e6e33898eb0506e940bd66822659ecf0a898bbb0da7cb9"],"repoTags":["registry.k8s.io/pause:latest"],"size":"247077"},{"id":"07655ddf2eebe5d250f7a72c25f638b27126805d61779741b4e62e69ba080558","repoDigests":["docker.io/kubernetesui/dashboard@sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93","docker.io/kubernetesui/dashboard@sha256:ca93706ef4e400542202d620b8094a7e4e568ca9b1869c71b053cdf8b5dc3029"],"repoTags":[],"size":"249229937"},{"id":"115053965e86b2df4d78af78d7951b8644839d20a03820c6df59a261103315f7","repoDigests":["docker.io/kubernetesui/metrics-scraper@sha256:43227e8286fd379ee0415a5e2156a9439c4056807e3caa38e1dd413b0644807a","docker.io/kubernetesui/metrics-scraper@sha256:76049887f07a0476dc93efc2d3569b9529bf982b22d29f356092ce206e98765c"],"repoTags":[],"size":"43824855"},{"id":"ead0a4a53df89fd173874b46093b6e62d8c72967bbf
606d672c9e8c9b601a4fc","repoDigests":["registry.k8s.io/coredns/coredns@sha256:a0ead06651cf580044aeb0a0feba63591858fb2e43ade8c9dea45a6a89ae7e5e","registry.k8s.io/coredns/coredns@sha256:be7652ce0b43b1339f3d14d9b14af9f588578011092c1f7893bd55432d83a378"],"repoTags":["registry.k8s.io/coredns/coredns:v1.10.1"],"size":"53621675"},{"id":"82e4c8a736a4fcf22b5ef9f6a4ff6207064c7187d7694bf97bd561605a538410","repoDigests":["registry.k8s.io/echoserver@sha256:cb3386f863f6a4b05f33c191361723f9d5927ac287463b1bea633bf859475969"],"repoTags":["registry.k8s.io/echoserver:1.8"],"size":"97846543"},{"id":"da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e","repoDigests":["registry.k8s.io/pause@sha256:84805ddcaaae94434d8eacb7e843f549ec1da0cd277787b97ad9d9ac2cea929e"],"repoTags":["registry.k8s.io/pause:3.1"],"size":"746911"},{"id":"4937520ae206c8969734d9a659fc1e6594d9b22b9340bf0796defbea0c92dd02","repoDigests":["docker.io/library/nginx@sha256:2d194184b067db3598771b4cf326cfe6ad5051937ba1132b8b7d4b0184e0d0a6","docker.io/libr
ary/nginx@sha256:2d4efe74ef541248b0a70838c557de04509d1115dec6bfc21ad0d66e41574a8a"],"repoTags":["docker.io/library/nginx:alpine"],"size":"43220780"},{"id":"ffd4cfbbe753e62419e129ee2ac618beb94e51baa7471df5038b0b516b59cf91","repoDigests":["gcr.io/google-containers/addon-resizer@sha256:0ce7cf4876524f069adf654e4dd3c95fe4bfc889c8bbc03cd6ecd061d9392126"],"repoTags":["gcr.io/google-containers/addon-resizer:functional-683521"],"size":"34114467"},{"id":"86b6af7dd652c1b38118be1c338e9354b33469e69a218f7e290a0ca5304ad681","repoDigests":["registry.k8s.io/etcd@sha256:51eae8381dcb1078289fa7b4f3df2630cdc18d09fb56f8e56b41c40e191d6c83","registry.k8s.io/etcd@sha256:8ae03c7bbd43d5c301eea33a39ac5eda2964f826050cb2ccf3486f18917590c9"],"repoTags":["registry.k8s.io/etcd:3.5.7-0"],"size":"297083935"},{"id":"08a0c939e61b7340db53ebf07b4d0e908a35ad8d94e2cb7d0f958210e567079a","repoDigests":["registry.k8s.io/kube-apiserver@sha256:e4d78564d3ce7ab34940eacc61c90d035cb8a6335552c9380eaff474e791ccbb","registry.k8s.io/kube-apiserver@sha256:fd03335
dd2e7163e5e36e933a0c735d7fec6f42b33ddafad0bc54f333e4a23c0"],"repoTags":["registry.k8s.io/kube-apiserver:v1.27.3"],"size":"122065872"},{"id":"7cffc01dba0e151e525544f87958d12c0fa62a9f173bbc930200ce815f2aaf3f","repoDigests":["registry.k8s.io/kube-controller-manager@sha256:1ad8df2b525e7270cbad6fd613c4f668e336edb4436f440e49b34c4cec4fac9e","registry.k8s.io/kube-controller-manager@sha256:d3bdc20876edfaa4894cf8464dc98592385a43cbc033b37846dccc2460c7bc06"],"repoTags":["registry.k8s.io/kube-controller-manager:v1.27.3"],"size":"113919286"},{"id":"89da1fb6dcb964dd35c3f41b7b93ffc35eaf20bc61f2e1335fea710a18424287","repoDigests":["docker.io/library/nginx@sha256:67f9a4f10d147a6e04629340e6493c9703300ca23a2f7f3aa56fe615d75d31ca","docker.io/library/nginx@sha256:73e957703f1266530db0aeac1fd6a3f87c1e59943f4c13eb340bb8521c6041d7"],"repoTags":["docker.io/library/nginx:latest"],"size":"191049983"},{"id":"56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c","repoDigests":["gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250
061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e","gcr.io/k8s-minikube/busybox@sha256:a85c92d5aa82aa6db0f92e5af591c2670a60a762da6bdfec52d960d55295f998"],"repoTags":["gcr.io/k8s-minikube/busybox:1.28.4-glibc"],"size":"4631262"},{"id":"6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562","repoDigests":["gcr.io/k8s-minikube/storage-provisioner@sha256:18eb69d1418e854ad5a19e399310e52808a8321e4c441c1dddad8977a0d7a944","gcr.io/k8s-minikube/storage-provisioner@sha256:c4c05d6ad6c0f24d87b39e596d4dddf64bec3e0d84f5b36e4511d4ebf583f38f"],"repoTags":["gcr.io/k8s-minikube/storage-provisioner:v5"],"size":"31470524"},{"id":"5780543258cf06f98595c003c0c6d22768d1fc8e9852e2839018a4bb3bfe163c","repoDigests":["registry.k8s.io/kube-proxy@sha256:091c9fe8428334e2451a0e5d214d40c415f2e0d0861794ee941f48003726570f","registry.k8s.io/kube-proxy@sha256:fb2bd59aae959e9649cb34101b66bb3c65f61eee9f3f81e40ed1e2325c92e699"],"repoTags":["registry.k8s.io/kube-proxy:v1.27.3"],"size":"72713623"},{"id":"41697ceeb70b3f49e54ed46f2cf27ac5b3a
201a7d9668ca327588b23fafdf36a","repoDigests":["registry.k8s.io/kube-scheduler@sha256:2b43d8f86e9fdc96a38743ab2b6efffd8b63d189f2c41e5de0f8deb8a8d0e082","registry.k8s.io/kube-scheduler@sha256:77b8db7564e395328905beb74a0b9a5db3218a4b16ec19af174957e518df40c8"],"repoTags":["registry.k8s.io/kube-scheduler:v1.27.3"],"size":"59811126"},{"id":"0184c1613d92931126feb4c548e5da11015513b9e4c104e7305ee8b53b50a9da","repoDigests":["registry.k8s.io/pause@sha256:1000de19145c53d83aab989956fa8fca08dcbcc5b0208bdc193517905e6ccd04"],"repoTags":["registry.k8s.io/pause:3.3"],"size":"686139"}]
functional_test.go:268: (dbg) Stderr: out/minikube-linux-amd64 -p functional-683521 image ls --format json --alsologtostderr:
I0731 10:42:51.628366   50289 out.go:296] Setting OutFile to fd 1 ...
I0731 10:42:51.628542   50289 out.go:343] TERM=,COLORTERM=, which probably does not support color
I0731 10:42:51.628553   50289 out.go:309] Setting ErrFile to fd 2...
I0731 10:42:51.628559   50289 out.go:343] TERM=,COLORTERM=, which probably does not support color
I0731 10:42:51.628866   50289 root.go:338] Updating PATH: /home/jenkins/minikube-integration/16969-5799/.minikube/bin
I0731 10:42:51.629525   50289 config.go:182] Loaded profile config "functional-683521": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.27.3
I0731 10:42:51.629626   50289 config.go:182] Loaded profile config "functional-683521": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.27.3
I0731 10:42:51.629990   50289 cli_runner.go:164] Run: docker container inspect functional-683521 --format={{.State.Status}}
I0731 10:42:51.648235   50289 ssh_runner.go:195] Run: systemctl --version
I0731 10:42:51.648277   50289 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-683521
I0731 10:42:51.668983   50289 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32782 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/functional-683521/id_rsa Username:docker}
I0731 10:42:51.758410   50289 ssh_runner.go:195] Run: sudo crictl images --output json
--- PASS: TestFunctional/parallel/ImageCommands/ImageListJson (0.22s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageListYaml (0.22s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageListYaml
=== PAUSE TestFunctional/parallel/ImageCommands/ImageListYaml

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ImageCommands/ImageListYaml
functional_test.go:260: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 image ls --format yaml --alsologtostderr
functional_test.go:265: (dbg) Stdout: out/minikube-linux-amd64 -p functional-683521 image ls --format yaml --alsologtostderr:
- id: b0b1fa0f58c6e932b7f20bf208b2841317a1e8c88cc51b18358310bbd8ec95da
repoDigests:
- docker.io/kindest/kindnetd@sha256:6c00e28db008c2afa67d9ee085c86184ec9ae5281d5ae1bd15006746fb9a1974
- docker.io/kindest/kindnetd@sha256:7c15172bd152f05b102cea9c8f82ef5abeb56797ec85630923fb98d20fd519e9
repoTags:
- docker.io/kindest/kindnetd:v20230511-dc714da8
size: "65249302"
- id: 82e4c8a736a4fcf22b5ef9f6a4ff6207064c7187d7694bf97bd561605a538410
repoDigests:
- registry.k8s.io/echoserver@sha256:cb3386f863f6a4b05f33c191361723f9d5927ac287463b1bea633bf859475969
repoTags:
- registry.k8s.io/echoserver:1.8
size: "97846543"
- id: 5780543258cf06f98595c003c0c6d22768d1fc8e9852e2839018a4bb3bfe163c
repoDigests:
- registry.k8s.io/kube-proxy@sha256:091c9fe8428334e2451a0e5d214d40c415f2e0d0861794ee941f48003726570f
- registry.k8s.io/kube-proxy@sha256:fb2bd59aae959e9649cb34101b66bb3c65f61eee9f3f81e40ed1e2325c92e699
repoTags:
- registry.k8s.io/kube-proxy:v1.27.3
size: "72713623"
- id: 41697ceeb70b3f49e54ed46f2cf27ac5b3a201a7d9668ca327588b23fafdf36a
repoDigests:
- registry.k8s.io/kube-scheduler@sha256:2b43d8f86e9fdc96a38743ab2b6efffd8b63d189f2c41e5de0f8deb8a8d0e082
- registry.k8s.io/kube-scheduler@sha256:77b8db7564e395328905beb74a0b9a5db3218a4b16ec19af174957e518df40c8
repoTags:
- registry.k8s.io/kube-scheduler:v1.27.3
size: "59811126"
- id: 115053965e86b2df4d78af78d7951b8644839d20a03820c6df59a261103315f7
repoDigests:
- docker.io/kubernetesui/metrics-scraper@sha256:43227e8286fd379ee0415a5e2156a9439c4056807e3caa38e1dd413b0644807a
- docker.io/kubernetesui/metrics-scraper@sha256:76049887f07a0476dc93efc2d3569b9529bf982b22d29f356092ce206e98765c
repoTags: []
size: "43824855"
- id: 86b6af7dd652c1b38118be1c338e9354b33469e69a218f7e290a0ca5304ad681
repoDigests:
- registry.k8s.io/etcd@sha256:51eae8381dcb1078289fa7b4f3df2630cdc18d09fb56f8e56b41c40e191d6c83
- registry.k8s.io/etcd@sha256:8ae03c7bbd43d5c301eea33a39ac5eda2964f826050cb2ccf3486f18917590c9
repoTags:
- registry.k8s.io/etcd:3.5.7-0
size: "297083935"
- id: 08a0c939e61b7340db53ebf07b4d0e908a35ad8d94e2cb7d0f958210e567079a
repoDigests:
- registry.k8s.io/kube-apiserver@sha256:e4d78564d3ce7ab34940eacc61c90d035cb8a6335552c9380eaff474e791ccbb
- registry.k8s.io/kube-apiserver@sha256:fd03335dd2e7163e5e36e933a0c735d7fec6f42b33ddafad0bc54f333e4a23c0
repoTags:
- registry.k8s.io/kube-apiserver:v1.27.3
size: "122065872"
- id: 7cffc01dba0e151e525544f87958d12c0fa62a9f173bbc930200ce815f2aaf3f
repoDigests:
- registry.k8s.io/kube-controller-manager@sha256:1ad8df2b525e7270cbad6fd613c4f668e336edb4436f440e49b34c4cec4fac9e
- registry.k8s.io/kube-controller-manager@sha256:d3bdc20876edfaa4894cf8464dc98592385a43cbc033b37846dccc2460c7bc06
repoTags:
- registry.k8s.io/kube-controller-manager:v1.27.3
size: "113919286"
- id: da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e
repoDigests:
- registry.k8s.io/pause@sha256:84805ddcaaae94434d8eacb7e843f549ec1da0cd277787b97ad9d9ac2cea929e
repoTags:
- registry.k8s.io/pause:3.1
size: "746911"
- id: 89da1fb6dcb964dd35c3f41b7b93ffc35eaf20bc61f2e1335fea710a18424287
repoDigests:
- docker.io/library/nginx@sha256:67f9a4f10d147a6e04629340e6493c9703300ca23a2f7f3aa56fe615d75d31ca
- docker.io/library/nginx@sha256:73e957703f1266530db0aeac1fd6a3f87c1e59943f4c13eb340bb8521c6041d7
repoTags:
- docker.io/library/nginx:latest
size: "191049983"
- id: 56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c
repoDigests:
- gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e
- gcr.io/k8s-minikube/busybox@sha256:a85c92d5aa82aa6db0f92e5af591c2670a60a762da6bdfec52d960d55295f998
repoTags:
- gcr.io/k8s-minikube/busybox:1.28.4-glibc
size: "4631262"
- id: 350b164e7ae1dcddeffadd65c76226c9b6dc5553f5179153fb0e36b78f2a5e06
repoDigests:
- registry.k8s.io/pause@sha256:5bcb06ed43da4a16c6e6e33898eb0506e940bd66822659ecf0a898bbb0da7cb9
repoTags:
- registry.k8s.io/pause:latest
size: "247077"
- id: 07655ddf2eebe5d250f7a72c25f638b27126805d61779741b4e62e69ba080558
repoDigests:
- docker.io/kubernetesui/dashboard@sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93
- docker.io/kubernetesui/dashboard@sha256:ca93706ef4e400542202d620b8094a7e4e568ca9b1869c71b053cdf8b5dc3029
repoTags: []
size: "249229937"
- id: d7b085374dbc1ca6ee83a18b488b9da0425749c87051e8bd8287dc2a2c775ecb
repoDigests:
- docker.io/library/mysql@sha256:2eabad08824e3120dbec9096c276e3956e1922636c06fbb383ae9ea9c499bf43
- docker.io/library/mysql@sha256:8e044d43c8d38550dc1c935a0797f76adfa55024dd075f30161602395f99f0ca
repoTags:
- docker.io/library/mysql:5.7
size: "601272484"
- id: 4937520ae206c8969734d9a659fc1e6594d9b22b9340bf0796defbea0c92dd02
repoDigests:
- docker.io/library/nginx@sha256:2d194184b067db3598771b4cf326cfe6ad5051937ba1132b8b7d4b0184e0d0a6
- docker.io/library/nginx@sha256:2d4efe74ef541248b0a70838c557de04509d1115dec6bfc21ad0d66e41574a8a
repoTags:
- docker.io/library/nginx:alpine
size: "43220780"
- id: ffd4cfbbe753e62419e129ee2ac618beb94e51baa7471df5038b0b516b59cf91
repoDigests:
- gcr.io/google-containers/addon-resizer@sha256:0ce7cf4876524f069adf654e4dd3c95fe4bfc889c8bbc03cd6ecd061d9392126
repoTags:
- gcr.io/google-containers/addon-resizer:functional-683521
size: "34114467"
- id: 6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562
repoDigests:
- gcr.io/k8s-minikube/storage-provisioner@sha256:18eb69d1418e854ad5a19e399310e52808a8321e4c441c1dddad8977a0d7a944
- gcr.io/k8s-minikube/storage-provisioner@sha256:c4c05d6ad6c0f24d87b39e596d4dddf64bec3e0d84f5b36e4511d4ebf583f38f
repoTags:
- gcr.io/k8s-minikube/storage-provisioner:v5
size: "31470524"
- id: ead0a4a53df89fd173874b46093b6e62d8c72967bbf606d672c9e8c9b601a4fc
repoDigests:
- registry.k8s.io/coredns/coredns@sha256:a0ead06651cf580044aeb0a0feba63591858fb2e43ade8c9dea45a6a89ae7e5e
- registry.k8s.io/coredns/coredns@sha256:be7652ce0b43b1339f3d14d9b14af9f588578011092c1f7893bd55432d83a378
repoTags:
- registry.k8s.io/coredns/coredns:v1.10.1
size: "53621675"
- id: 0184c1613d92931126feb4c548e5da11015513b9e4c104e7305ee8b53b50a9da
repoDigests:
- registry.k8s.io/pause@sha256:1000de19145c53d83aab989956fa8fca08dcbcc5b0208bdc193517905e6ccd04
repoTags:
- registry.k8s.io/pause:3.3
size: "686139"
- id: e6f1816883972d4be47bd48879a08919b96afcd344132622e4d444987919323c
repoDigests:
- registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097
- registry.k8s.io/pause@sha256:8d4106c88ec0bd28001e34c975d65175d994072d65341f62a8ab0754b0fafe10
repoTags:
- registry.k8s.io/pause:3.9
size: "750414"

                                                
                                                
functional_test.go:268: (dbg) Stderr: out/minikube-linux-amd64 -p functional-683521 image ls --format yaml --alsologtostderr:
I0731 10:42:51.411199   50130 out.go:296] Setting OutFile to fd 1 ...
I0731 10:42:51.411322   50130 out.go:343] TERM=,COLORTERM=, which probably does not support color
I0731 10:42:51.411333   50130 out.go:309] Setting ErrFile to fd 2...
I0731 10:42:51.411339   50130 out.go:343] TERM=,COLORTERM=, which probably does not support color
I0731 10:42:51.411666   50130 root.go:338] Updating PATH: /home/jenkins/minikube-integration/16969-5799/.minikube/bin
I0731 10:42:51.412459   50130 config.go:182] Loaded profile config "functional-683521": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.27.3
I0731 10:42:51.412603   50130 config.go:182] Loaded profile config "functional-683521": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.27.3
I0731 10:42:51.413193   50130 cli_runner.go:164] Run: docker container inspect functional-683521 --format={{.State.Status}}
I0731 10:42:51.429649   50130 ssh_runner.go:195] Run: systemctl --version
I0731 10:42:51.429685   50130 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-683521
I0731 10:42:51.449442   50130 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32782 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/functional-683521/id_rsa Username:docker}
I0731 10:42:51.546339   50130 ssh_runner.go:195] Run: sudo crictl images --output json
--- PASS: TestFunctional/parallel/ImageCommands/ImageListYaml (0.22s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageBuild (1.74s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageBuild
=== PAUSE TestFunctional/parallel/ImageCommands/ImageBuild

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ImageCommands/ImageBuild
functional_test.go:307: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh pgrep buildkitd
functional_test.go:307: (dbg) Non-zero exit: out/minikube-linux-amd64 -p functional-683521 ssh pgrep buildkitd: exit status 1 (255.494334ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
functional_test.go:314: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 image build -t localhost/my-image:functional-683521 testdata/build --alsologtostderr
functional_test.go:314: (dbg) Done: out/minikube-linux-amd64 -p functional-683521 image build -t localhost/my-image:functional-683521 testdata/build --alsologtostderr: (1.288240817s)
functional_test.go:319: (dbg) Stdout: out/minikube-linux-amd64 -p functional-683521 image build -t localhost/my-image:functional-683521 testdata/build --alsologtostderr:
STEP 1/3: FROM gcr.io/k8s-minikube/busybox
STEP 2/3: RUN true
--> 4fd9b702482
STEP 3/3: ADD content.txt /
COMMIT localhost/my-image:functional-683521
--> 61a666a9911
Successfully tagged localhost/my-image:functional-683521
61a666a9911984a959ccf0689ddb2eed7fe89caffc516a99a5f5f0f62606a265
functional_test.go:322: (dbg) Stderr: out/minikube-linux-amd64 -p functional-683521 image build -t localhost/my-image:functional-683521 testdata/build --alsologtostderr:
I0731 10:42:51.671894   50316 out.go:296] Setting OutFile to fd 1 ...
I0731 10:42:51.672032   50316 out.go:343] TERM=,COLORTERM=, which probably does not support color
I0731 10:42:51.672042   50316 out.go:309] Setting ErrFile to fd 2...
I0731 10:42:51.672047   50316 out.go:343] TERM=,COLORTERM=, which probably does not support color
I0731 10:42:51.672234   50316 root.go:338] Updating PATH: /home/jenkins/minikube-integration/16969-5799/.minikube/bin
I0731 10:42:51.672753   50316 config.go:182] Loaded profile config "functional-683521": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.27.3
I0731 10:42:51.673244   50316 config.go:182] Loaded profile config "functional-683521": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.27.3
I0731 10:42:51.673600   50316 cli_runner.go:164] Run: docker container inspect functional-683521 --format={{.State.Status}}
I0731 10:42:51.691281   50316 ssh_runner.go:195] Run: systemctl --version
I0731 10:42:51.691333   50316 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-683521
I0731 10:42:51.708176   50316 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32782 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/functional-683521/id_rsa Username:docker}
I0731 10:42:51.798876   50316 build_images.go:151] Building image from path: /tmp/build.1767197143.tar
I0731 10:42:51.798934   50316 ssh_runner.go:195] Run: sudo mkdir -p /var/lib/minikube/build
I0731 10:42:51.806831   50316 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/build/build.1767197143.tar
I0731 10:42:51.810442   50316 ssh_runner.go:352] existence check for /var/lib/minikube/build/build.1767197143.tar: stat -c "%s %y" /var/lib/minikube/build/build.1767197143.tar: Process exited with status 1
stdout:

                                                
                                                
stderr:
stat: cannot statx '/var/lib/minikube/build/build.1767197143.tar': No such file or directory
I0731 10:42:51.810473   50316 ssh_runner.go:362] scp /tmp/build.1767197143.tar --> /var/lib/minikube/build/build.1767197143.tar (3072 bytes)
I0731 10:42:51.831036   50316 ssh_runner.go:195] Run: sudo mkdir -p /var/lib/minikube/build/build.1767197143
I0731 10:42:51.838022   50316 ssh_runner.go:195] Run: sudo tar -C /var/lib/minikube/build/build.1767197143 -xf /var/lib/minikube/build/build.1767197143.tar
I0731 10:42:51.845633   50316 crio.go:297] Building image: /var/lib/minikube/build/build.1767197143
I0731 10:42:51.845704   50316 ssh_runner.go:195] Run: sudo podman build -t localhost/my-image:functional-683521 /var/lib/minikube/build/build.1767197143 --cgroup-manager=cgroupfs
Trying to pull gcr.io/k8s-minikube/busybox:latest...
Getting image source signatures
Copying blob sha256:5cc84ad355aaa64f46ea9c7bbcc319a9d808ab15088a27209c9e70ef86e5a2aa
Copying blob sha256:5cc84ad355aaa64f46ea9c7bbcc319a9d808ab15088a27209c9e70ef86e5a2aa
Copying config sha256:beae173ccac6ad749f76713cf4440fe3d21d1043fe616dfbe30775815d1d0f6a
Writing manifest to image destination
Storing signatures
I0731 10:42:52.887572   50316 ssh_runner.go:235] Completed: sudo podman build -t localhost/my-image:functional-683521 /var/lib/minikube/build/build.1767197143 --cgroup-manager=cgroupfs: (1.041839377s)
I0731 10:42:52.887619   50316 ssh_runner.go:195] Run: sudo rm -rf /var/lib/minikube/build/build.1767197143
I0731 10:42:52.895398   50316 ssh_runner.go:195] Run: sudo rm -f /var/lib/minikube/build/build.1767197143.tar
I0731 10:42:52.902770   50316 build_images.go:207] Built localhost/my-image:functional-683521 from /tmp/build.1767197143.tar
I0731 10:42:52.902792   50316 build_images.go:123] succeeded building to: functional-683521
I0731 10:42:52.902795   50316 build_images.go:124] failed building to: 
functional_test.go:447: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 image ls
--- PASS: TestFunctional/parallel/ImageCommands/ImageBuild (1.74s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/Setup (0.9s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/Setup
functional_test.go:341: (dbg) Run:  docker pull gcr.io/google-containers/addon-resizer:1.8.8
functional_test.go:346: (dbg) Run:  docker tag gcr.io/google-containers/addon-resizer:1.8.8 gcr.io/google-containers/addon-resizer:functional-683521
--- PASS: TestFunctional/parallel/ImageCommands/Setup (0.90s)

                                                
                                    
x
+
TestFunctional/parallel/ProfileCmd/profile_not_create (0.38s)

                                                
                                                
=== RUN   TestFunctional/parallel/ProfileCmd/profile_not_create
functional_test.go:1269: (dbg) Run:  out/minikube-linux-amd64 profile lis
functional_test.go:1274: (dbg) Run:  out/minikube-linux-amd64 profile list --output json
--- PASS: TestFunctional/parallel/ProfileCmd/profile_not_create (0.38s)

                                                
                                    
x
+
TestFunctional/parallel/ProfileCmd/profile_list (0.34s)

                                                
                                                
=== RUN   TestFunctional/parallel/ProfileCmd/profile_list
functional_test.go:1309: (dbg) Run:  out/minikube-linux-amd64 profile list
functional_test.go:1314: Took "287.876457ms" to run "out/minikube-linux-amd64 profile list"
functional_test.go:1323: (dbg) Run:  out/minikube-linux-amd64 profile list -l
functional_test.go:1328: Took "54.963619ms" to run "out/minikube-linux-amd64 profile list -l"
--- PASS: TestFunctional/parallel/ProfileCmd/profile_list (0.34s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageLoadDaemon (5.03s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageLoadDaemon
functional_test.go:354: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 image load --daemon gcr.io/google-containers/addon-resizer:functional-683521 --alsologtostderr
functional_test.go:354: (dbg) Done: out/minikube-linux-amd64 -p functional-683521 image load --daemon gcr.io/google-containers/addon-resizer:functional-683521 --alsologtostderr: (4.815852135s)
functional_test.go:447: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 image ls
--- PASS: TestFunctional/parallel/ImageCommands/ImageLoadDaemon (5.03s)

                                                
                                    
x
+
TestFunctional/parallel/ProfileCmd/profile_json_output (0.42s)

                                                
                                                
=== RUN   TestFunctional/parallel/ProfileCmd/profile_json_output
functional_test.go:1360: (dbg) Run:  out/minikube-linux-amd64 profile list -o json
functional_test.go:1365: Took "345.510306ms" to run "out/minikube-linux-amd64 profile list -o json"
functional_test.go:1373: (dbg) Run:  out/minikube-linux-amd64 profile list -o json --light
functional_test.go:1378: Took "73.251561ms" to run "out/minikube-linux-amd64 profile list -o json --light"
--- PASS: TestFunctional/parallel/ProfileCmd/profile_json_output (0.42s)

                                                
                                    
x
+
TestFunctional/parallel/MountCmd/any-port (6.89s)

                                                
                                                
=== RUN   TestFunctional/parallel/MountCmd/any-port
functional_test_mount_test.go:73: (dbg) daemon: [out/minikube-linux-amd64 mount -p functional-683521 /tmp/TestFunctionalparallelMountCmdany-port280728022/001:/mount-9p --alsologtostderr -v=1]
functional_test_mount_test.go:107: wrote "test-1690800145964174961" to /tmp/TestFunctionalparallelMountCmdany-port280728022/001/created-by-test
functional_test_mount_test.go:107: wrote "test-1690800145964174961" to /tmp/TestFunctionalparallelMountCmdany-port280728022/001/created-by-test-removed-by-pod
functional_test_mount_test.go:107: wrote "test-1690800145964174961" to /tmp/TestFunctionalparallelMountCmdany-port280728022/001/test-1690800145964174961
functional_test_mount_test.go:115: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh "findmnt -T /mount-9p | grep 9p"
functional_test_mount_test.go:115: (dbg) Non-zero exit: out/minikube-linux-amd64 -p functional-683521 ssh "findmnt -T /mount-9p | grep 9p": exit status 1 (257.990607ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
functional_test_mount_test.go:115: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh "findmnt -T /mount-9p | grep 9p"
functional_test_mount_test.go:129: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh -- ls -la /mount-9p
functional_test_mount_test.go:133: guest mount directory contents
total 2
-rw-r--r-- 1 docker docker 24 Jul 31 10:42 created-by-test
-rw-r--r-- 1 docker docker 24 Jul 31 10:42 created-by-test-removed-by-pod
-rw-r--r-- 1 docker docker 24 Jul 31 10:42 test-1690800145964174961
functional_test_mount_test.go:137: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh cat /mount-9p/test-1690800145964174961
functional_test_mount_test.go:148: (dbg) Run:  kubectl --context functional-683521 replace --force -f testdata/busybox-mount-test.yaml
functional_test_mount_test.go:153: (dbg) TestFunctional/parallel/MountCmd/any-port: waiting 4m0s for pods matching "integration-test=busybox-mount" in namespace "default" ...
helpers_test.go:344: "busybox-mount" [2dbb317a-51f9-4a82-af57-7d7d6f320f5b] Pending
helpers_test.go:344: "busybox-mount" [2dbb317a-51f9-4a82-af57-7d7d6f320f5b] Pending / Ready:ContainersNotReady (containers with unready status: [mount-munger]) / ContainersReady:ContainersNotReady (containers with unready status: [mount-munger])
helpers_test.go:344: "busybox-mount" [2dbb317a-51f9-4a82-af57-7d7d6f320f5b] Running
helpers_test.go:344: "busybox-mount" [2dbb317a-51f9-4a82-af57-7d7d6f320f5b] Running: Initialized:PodCompleted / Ready:PodCompleted / ContainersReady:PodCompleted
helpers_test.go:344: "busybox-mount" [2dbb317a-51f9-4a82-af57-7d7d6f320f5b] Succeeded: Initialized:PodCompleted / Ready:PodCompleted / ContainersReady:PodCompleted
functional_test_mount_test.go:153: (dbg) TestFunctional/parallel/MountCmd/any-port: integration-test=busybox-mount healthy within 4.010189509s
functional_test_mount_test.go:169: (dbg) Run:  kubectl --context functional-683521 logs busybox-mount
functional_test_mount_test.go:181: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh stat /mount-9p/created-by-test
functional_test_mount_test.go:181: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh stat /mount-9p/created-by-pod
functional_test_mount_test.go:90: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh "sudo umount -f /mount-9p"
functional_test_mount_test.go:94: (dbg) stopping [out/minikube-linux-amd64 mount -p functional-683521 /tmp/TestFunctionalparallelMountCmdany-port280728022/001:/mount-9p --alsologtostderr -v=1] ...
--- PASS: TestFunctional/parallel/MountCmd/any-port (6.89s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageReloadDaemon (2.94s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageReloadDaemon
functional_test.go:364: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 image load --daemon gcr.io/google-containers/addon-resizer:functional-683521 --alsologtostderr
functional_test.go:364: (dbg) Done: out/minikube-linux-amd64 -p functional-683521 image load --daemon gcr.io/google-containers/addon-resizer:functional-683521 --alsologtostderr: (2.734894488s)
functional_test.go:447: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 image ls
--- PASS: TestFunctional/parallel/ImageCommands/ImageReloadDaemon (2.94s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageTagAndLoadDaemon (4.92s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageTagAndLoadDaemon
functional_test.go:234: (dbg) Run:  docker pull gcr.io/google-containers/addon-resizer:1.8.9
functional_test.go:239: (dbg) Run:  docker tag gcr.io/google-containers/addon-resizer:1.8.9 gcr.io/google-containers/addon-resizer:functional-683521
functional_test.go:244: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 image load --daemon gcr.io/google-containers/addon-resizer:functional-683521 --alsologtostderr
functional_test.go:244: (dbg) Done: out/minikube-linux-amd64 -p functional-683521 image load --daemon gcr.io/google-containers/addon-resizer:functional-683521 --alsologtostderr: (3.905848805s)
functional_test.go:447: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 image ls
--- PASS: TestFunctional/parallel/ImageCommands/ImageTagAndLoadDaemon (4.92s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageSaveToFile (1.23s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageSaveToFile
functional_test.go:379: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 image save gcr.io/google-containers/addon-resizer:functional-683521 /home/jenkins/workspace/Docker_Linux_crio_integration/addon-resizer-save.tar --alsologtostderr
functional_test.go:379: (dbg) Done: out/minikube-linux-amd64 -p functional-683521 image save gcr.io/google-containers/addon-resizer:functional-683521 /home/jenkins/workspace/Docker_Linux_crio_integration/addon-resizer-save.tar --alsologtostderr: (1.230515706s)
--- PASS: TestFunctional/parallel/ImageCommands/ImageSaveToFile (1.23s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageRemove (0.91s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageRemove
functional_test.go:391: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 image rm gcr.io/google-containers/addon-resizer:functional-683521 --alsologtostderr
functional_test.go:447: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 image ls
--- PASS: TestFunctional/parallel/ImageCommands/ImageRemove (0.91s)

                                                
                                    
x
+
TestFunctional/parallel/MountCmd/VerifyCleanup (1.91s)

                                                
                                                
=== RUN   TestFunctional/parallel/MountCmd/VerifyCleanup
functional_test_mount_test.go:298: (dbg) daemon: [out/minikube-linux-amd64 mount -p functional-683521 /tmp/TestFunctionalparallelMountCmdVerifyCleanup2460904278/001:/mount1 --alsologtostderr -v=1]
functional_test_mount_test.go:298: (dbg) daemon: [out/minikube-linux-amd64 mount -p functional-683521 /tmp/TestFunctionalparallelMountCmdVerifyCleanup2460904278/001:/mount2 --alsologtostderr -v=1]
functional_test_mount_test.go:298: (dbg) daemon: [out/minikube-linux-amd64 mount -p functional-683521 /tmp/TestFunctionalparallelMountCmdVerifyCleanup2460904278/001:/mount3 --alsologtostderr -v=1]
functional_test_mount_test.go:325: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh "findmnt -T" /mount1
functional_test_mount_test.go:325: (dbg) Non-zero exit: out/minikube-linux-amd64 -p functional-683521 ssh "findmnt -T" /mount1: exit status 1 (383.743838ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
functional_test_mount_test.go:325: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh "findmnt -T" /mount1
functional_test_mount_test.go:325: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh "findmnt -T" /mount2
functional_test_mount_test.go:325: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 ssh "findmnt -T" /mount3
functional_test_mount_test.go:370: (dbg) Run:  out/minikube-linux-amd64 mount -p functional-683521 --kill=true
functional_test_mount_test.go:313: (dbg) stopping [out/minikube-linux-amd64 mount -p functional-683521 /tmp/TestFunctionalparallelMountCmdVerifyCleanup2460904278/001:/mount1 --alsologtostderr -v=1] ...
helpers_test.go:490: unable to find parent, assuming dead: process does not exist
functional_test_mount_test.go:313: (dbg) stopping [out/minikube-linux-amd64 mount -p functional-683521 /tmp/TestFunctionalparallelMountCmdVerifyCleanup2460904278/001:/mount2 --alsologtostderr -v=1] ...
helpers_test.go:490: unable to find parent, assuming dead: process does not exist
functional_test_mount_test.go:313: (dbg) stopping [out/minikube-linux-amd64 mount -p functional-683521 /tmp/TestFunctionalparallelMountCmdVerifyCleanup2460904278/001:/mount3 --alsologtostderr -v=1] ...
helpers_test.go:490: unable to find parent, assuming dead: process does not exist
--- PASS: TestFunctional/parallel/MountCmd/VerifyCleanup (1.91s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageSaveDaemon (3.53s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageSaveDaemon
functional_test.go:418: (dbg) Run:  docker rmi gcr.io/google-containers/addon-resizer:functional-683521
functional_test.go:423: (dbg) Run:  out/minikube-linux-amd64 -p functional-683521 image save --daemon gcr.io/google-containers/addon-resizer:functional-683521 --alsologtostderr
functional_test.go:423: (dbg) Done: out/minikube-linux-amd64 -p functional-683521 image save --daemon gcr.io/google-containers/addon-resizer:functional-683521 --alsologtostderr: (3.43780287s)
functional_test.go:428: (dbg) Run:  docker image inspect gcr.io/google-containers/addon-resizer:functional-683521
--- PASS: TestFunctional/parallel/ImageCommands/ImageSaveDaemon (3.53s)

                                                
                                    
x
+
TestFunctional/delete_addon-resizer_images (0.07s)

                                                
                                                
=== RUN   TestFunctional/delete_addon-resizer_images
functional_test.go:189: (dbg) Run:  docker rmi -f gcr.io/google-containers/addon-resizer:1.8.8
functional_test.go:189: (dbg) Run:  docker rmi -f gcr.io/google-containers/addon-resizer:functional-683521
--- PASS: TestFunctional/delete_addon-resizer_images (0.07s)

                                                
                                    
x
+
TestFunctional/delete_my-image_image (0.02s)

                                                
                                                
=== RUN   TestFunctional/delete_my-image_image
functional_test.go:197: (dbg) Run:  docker rmi -f localhost/my-image:functional-683521
--- PASS: TestFunctional/delete_my-image_image (0.02s)

                                                
                                    
x
+
TestFunctional/delete_minikube_cached_images (0.01s)

                                                
                                                
=== RUN   TestFunctional/delete_minikube_cached_images
functional_test.go:205: (dbg) Run:  docker rmi -f minikube-local-cache-test:functional-683521
--- PASS: TestFunctional/delete_minikube_cached_images (0.01s)

                                                
                                    
x
+
TestIngressAddonLegacy/StartLegacyK8sCluster (78.32s)

                                                
                                                
=== RUN   TestIngressAddonLegacy/StartLegacyK8sCluster
ingress_addon_legacy_test.go:39: (dbg) Run:  out/minikube-linux-amd64 start -p ingress-addon-legacy-538476 --kubernetes-version=v1.18.20 --memory=4096 --wait=true --alsologtostderr -v=5 --driver=docker  --container-runtime=crio
E0731 10:43:42.725230   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/client.crt: no such file or directory
ingress_addon_legacy_test.go:39: (dbg) Done: out/minikube-linux-amd64 start -p ingress-addon-legacy-538476 --kubernetes-version=v1.18.20 --memory=4096 --wait=true --alsologtostderr -v=5 --driver=docker  --container-runtime=crio: (1m18.321148594s)
--- PASS: TestIngressAddonLegacy/StartLegacyK8sCluster (78.32s)

                                                
                                    
x
+
TestIngressAddonLegacy/serial/ValidateIngressAddonActivation (10.72s)

                                                
                                                
=== RUN   TestIngressAddonLegacy/serial/ValidateIngressAddonActivation
ingress_addon_legacy_test.go:70: (dbg) Run:  out/minikube-linux-amd64 -p ingress-addon-legacy-538476 addons enable ingress --alsologtostderr -v=5
ingress_addon_legacy_test.go:70: (dbg) Done: out/minikube-linux-amd64 -p ingress-addon-legacy-538476 addons enable ingress --alsologtostderr -v=5: (10.721227421s)
--- PASS: TestIngressAddonLegacy/serial/ValidateIngressAddonActivation (10.72s)

                                                
                                    
x
+
TestIngressAddonLegacy/serial/ValidateIngressDNSAddonActivation (0.51s)

                                                
                                                
=== RUN   TestIngressAddonLegacy/serial/ValidateIngressDNSAddonActivation
ingress_addon_legacy_test.go:79: (dbg) Run:  out/minikube-linux-amd64 -p ingress-addon-legacy-538476 addons enable ingress-dns --alsologtostderr -v=5
--- PASS: TestIngressAddonLegacy/serial/ValidateIngressDNSAddonActivation (0.51s)

                                                
                                    
x
+
TestJSONOutput/start/Command (66.81s)

                                                
                                                
=== RUN   TestJSONOutput/start/Command
json_output_test.go:63: (dbg) Run:  out/minikube-linux-amd64 start -p json-output-108054 --output=json --user=testUser --memory=2200 --wait=true --driver=docker  --container-runtime=crio
E0731 10:47:32.803331   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/functional-683521/client.crt: no such file or directory
E0731 10:47:53.284048   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/functional-683521/client.crt: no such file or directory
E0731 10:48:34.244727   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/functional-683521/client.crt: no such file or directory
json_output_test.go:63: (dbg) Done: out/minikube-linux-amd64 start -p json-output-108054 --output=json --user=testUser --memory=2200 --wait=true --driver=docker  --container-runtime=crio: (1m6.808047519s)
--- PASS: TestJSONOutput/start/Command (66.81s)

                                                
                                    
x
+
TestJSONOutput/start/Audit (0s)

                                                
                                                
=== RUN   TestJSONOutput/start/Audit
--- PASS: TestJSONOutput/start/Audit (0.00s)

                                                
                                    
x
+
TestJSONOutput/start/parallel/DistinctCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/start/parallel/DistinctCurrentSteps
=== PAUSE TestJSONOutput/start/parallel/DistinctCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/start/parallel/DistinctCurrentSteps
--- PASS: TestJSONOutput/start/parallel/DistinctCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/start/parallel/IncreasingCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/start/parallel/IncreasingCurrentSteps
=== PAUSE TestJSONOutput/start/parallel/IncreasingCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/start/parallel/IncreasingCurrentSteps
--- PASS: TestJSONOutput/start/parallel/IncreasingCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/pause/Command (0.62s)

                                                
                                                
=== RUN   TestJSONOutput/pause/Command
json_output_test.go:63: (dbg) Run:  out/minikube-linux-amd64 pause -p json-output-108054 --output=json --user=testUser
--- PASS: TestJSONOutput/pause/Command (0.62s)

                                                
                                    
x
+
TestJSONOutput/pause/Audit (0s)

                                                
                                                
=== RUN   TestJSONOutput/pause/Audit
--- PASS: TestJSONOutput/pause/Audit (0.00s)

                                                
                                    
x
+
TestJSONOutput/pause/parallel/DistinctCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/pause/parallel/DistinctCurrentSteps
=== PAUSE TestJSONOutput/pause/parallel/DistinctCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/pause/parallel/DistinctCurrentSteps
--- PASS: TestJSONOutput/pause/parallel/DistinctCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/pause/parallel/IncreasingCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/pause/parallel/IncreasingCurrentSteps
=== PAUSE TestJSONOutput/pause/parallel/IncreasingCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/pause/parallel/IncreasingCurrentSteps
--- PASS: TestJSONOutput/pause/parallel/IncreasingCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/unpause/Command (0.57s)

                                                
                                                
=== RUN   TestJSONOutput/unpause/Command
json_output_test.go:63: (dbg) Run:  out/minikube-linux-amd64 unpause -p json-output-108054 --output=json --user=testUser
--- PASS: TestJSONOutput/unpause/Command (0.57s)

                                                
                                    
x
+
TestJSONOutput/unpause/Audit (0s)

                                                
                                                
=== RUN   TestJSONOutput/unpause/Audit
--- PASS: TestJSONOutput/unpause/Audit (0.00s)

                                                
                                    
x
+
TestJSONOutput/unpause/parallel/DistinctCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/unpause/parallel/DistinctCurrentSteps
=== PAUSE TestJSONOutput/unpause/parallel/DistinctCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/unpause/parallel/DistinctCurrentSteps
--- PASS: TestJSONOutput/unpause/parallel/DistinctCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/unpause/parallel/IncreasingCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/unpause/parallel/IncreasingCurrentSteps
=== PAUSE TestJSONOutput/unpause/parallel/IncreasingCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/unpause/parallel/IncreasingCurrentSteps
--- PASS: TestJSONOutput/unpause/parallel/IncreasingCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/stop/Command (5.64s)

                                                
                                                
=== RUN   TestJSONOutput/stop/Command
json_output_test.go:63: (dbg) Run:  out/minikube-linux-amd64 stop -p json-output-108054 --output=json --user=testUser
json_output_test.go:63: (dbg) Done: out/minikube-linux-amd64 stop -p json-output-108054 --output=json --user=testUser: (5.64063599s)
--- PASS: TestJSONOutput/stop/Command (5.64s)

                                                
                                    
x
+
TestJSONOutput/stop/Audit (0s)

                                                
                                                
=== RUN   TestJSONOutput/stop/Audit
--- PASS: TestJSONOutput/stop/Audit (0.00s)

                                                
                                    
x
+
TestJSONOutput/stop/parallel/DistinctCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/stop/parallel/DistinctCurrentSteps
=== PAUSE TestJSONOutput/stop/parallel/DistinctCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/stop/parallel/DistinctCurrentSteps
--- PASS: TestJSONOutput/stop/parallel/DistinctCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/stop/parallel/IncreasingCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/stop/parallel/IncreasingCurrentSteps
=== PAUSE TestJSONOutput/stop/parallel/IncreasingCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/stop/parallel/IncreasingCurrentSteps
--- PASS: TestJSONOutput/stop/parallel/IncreasingCurrentSteps (0.00s)

                                                
                                    
x
+
TestErrorJSONOutput (0.18s)

                                                
                                                
=== RUN   TestErrorJSONOutput
json_output_test.go:160: (dbg) Run:  out/minikube-linux-amd64 start -p json-output-error-640468 --memory=2200 --output=json --wait=true --driver=fail
json_output_test.go:160: (dbg) Non-zero exit: out/minikube-linux-amd64 start -p json-output-error-640468 --memory=2200 --output=json --wait=true --driver=fail: exit status 56 (60.446635ms)

                                                
                                                
-- stdout --
	{"specversion":"1.0","id":"c7ebda68-17f3-4942-be22-0b4c4208d67d","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"0","message":"[json-output-error-640468] minikube v1.31.1 on Ubuntu 20.04 (kvm/amd64)","name":"Initial Minikube Setup","totalsteps":"19"}}
	{"specversion":"1.0","id":"261e8ebb-e5c8-400d-96f1-9571be814854","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_LOCATION=16969"}}
	{"specversion":"1.0","id":"2a92c5b2-7481-42f9-b896-ed9cf31ce4fc","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true"}}
	{"specversion":"1.0","id":"f7220ccf-db7b-4d22-ac83-55e56197f50f","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"KUBECONFIG=/home/jenkins/minikube-integration/16969-5799/kubeconfig"}}
	{"specversion":"1.0","id":"4aad25a6-4cb9-4757-8622-08bbad634dc9","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_HOME=/home/jenkins/minikube-integration/16969-5799/.minikube"}}
	{"specversion":"1.0","id":"affa3e85-a4aa-45f2-b49c-4deff2e2e27d","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_BIN=out/minikube-linux-amd64"}}
	{"specversion":"1.0","id":"43aa0ff6-30fb-49d1-9a6f-2669ed53c582","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_FORCE_SYSTEMD="}}
	{"specversion":"1.0","id":"04344818-0a38-4faa-a14c-25e28558404f","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.error","datacontenttype":"application/json","data":{"advice":"","exitcode":"56","issues":"","message":"The driver 'fail' is not supported on linux/amd64","name":"DRV_UNSUPPORTED_OS","url":""}}

                                                
                                                
-- /stdout --
helpers_test.go:175: Cleaning up "json-output-error-640468" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p json-output-error-640468
--- PASS: TestErrorJSONOutput (0.18s)

                                                
                                    
x
+
TestKicCustomNetwork/create_custom_network (31.24s)

                                                
                                                
=== RUN   TestKicCustomNetwork/create_custom_network
kic_custom_network_test.go:57: (dbg) Run:  out/minikube-linux-amd64 start -p docker-network-443496 --network=
kic_custom_network_test.go:57: (dbg) Done: out/minikube-linux-amd64 start -p docker-network-443496 --network=: (29.174384875s)
kic_custom_network_test.go:150: (dbg) Run:  docker network ls --format {{.Name}}
helpers_test.go:175: Cleaning up "docker-network-443496" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p docker-network-443496
helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p docker-network-443496: (2.05015125s)
--- PASS: TestKicCustomNetwork/create_custom_network (31.24s)

                                                
                                    
x
+
TestKicCustomNetwork/use_default_bridge_network (23.22s)

                                                
                                                
=== RUN   TestKicCustomNetwork/use_default_bridge_network
kic_custom_network_test.go:57: (dbg) Run:  out/minikube-linux-amd64 start -p docker-network-517092 --network=bridge
E0731 10:49:32.970352   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/client.crt: no such file or directory
E0731 10:49:32.975602   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/client.crt: no such file or directory
E0731 10:49:32.985841   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/client.crt: no such file or directory
E0731 10:49:33.006075   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/client.crt: no such file or directory
E0731 10:49:33.046333   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/client.crt: no such file or directory
E0731 10:49:33.126643   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/client.crt: no such file or directory
E0731 10:49:33.287097   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/client.crt: no such file or directory
E0731 10:49:33.607434   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/client.crt: no such file or directory
E0731 10:49:34.248496   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/client.crt: no such file or directory
E0731 10:49:35.529336   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/client.crt: no such file or directory
E0731 10:49:38.090287   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/client.crt: no such file or directory
kic_custom_network_test.go:57: (dbg) Done: out/minikube-linux-amd64 start -p docker-network-517092 --network=bridge: (21.309577461s)
kic_custom_network_test.go:150: (dbg) Run:  docker network ls --format {{.Name}}
helpers_test.go:175: Cleaning up "docker-network-517092" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p docker-network-517092
helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p docker-network-517092: (1.891785436s)
--- PASS: TestKicCustomNetwork/use_default_bridge_network (23.22s)

                                                
                                    
x
+
TestKicExistingNetwork (27.13s)

                                                
                                                
=== RUN   TestKicExistingNetwork
kic_custom_network_test.go:150: (dbg) Run:  docker network ls --format {{.Name}}
kic_custom_network_test.go:93: (dbg) Run:  out/minikube-linux-amd64 start -p existing-network-308539 --network=existing-network
E0731 10:49:43.211368   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/client.crt: no such file or directory
E0731 10:49:53.451811   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/client.crt: no such file or directory
E0731 10:49:56.165471   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/functional-683521/client.crt: no such file or directory
kic_custom_network_test.go:93: (dbg) Done: out/minikube-linux-amd64 start -p existing-network-308539 --network=existing-network: (25.10970714s)
helpers_test.go:175: Cleaning up "existing-network-308539" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p existing-network-308539
helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p existing-network-308539: (1.899370322s)
--- PASS: TestKicExistingNetwork (27.13s)

                                                
                                    
x
+
TestKicCustomSubnet (23.75s)

                                                
                                                
=== RUN   TestKicCustomSubnet
kic_custom_network_test.go:112: (dbg) Run:  out/minikube-linux-amd64 start -p custom-subnet-237473 --subnet=192.168.60.0/24
E0731 10:50:13.932486   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/client.crt: no such file or directory
kic_custom_network_test.go:112: (dbg) Done: out/minikube-linux-amd64 start -p custom-subnet-237473 --subnet=192.168.60.0/24: (21.738717708s)
kic_custom_network_test.go:161: (dbg) Run:  docker network inspect custom-subnet-237473 --format "{{(index .IPAM.Config 0).Subnet}}"
helpers_test.go:175: Cleaning up "custom-subnet-237473" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p custom-subnet-237473
helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p custom-subnet-237473: (1.99669724s)
--- PASS: TestKicCustomSubnet (23.75s)

                                                
                                    
x
+
TestKicStaticIP (26.03s)

                                                
                                                
=== RUN   TestKicStaticIP
kic_custom_network_test.go:132: (dbg) Run:  out/minikube-linux-amd64 start -p static-ip-620276 --static-ip=192.168.200.200
E0731 10:50:54.893147   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/client.crt: no such file or directory
kic_custom_network_test.go:132: (dbg) Done: out/minikube-linux-amd64 start -p static-ip-620276 --static-ip=192.168.200.200: (23.838180733s)
kic_custom_network_test.go:138: (dbg) Run:  out/minikube-linux-amd64 -p static-ip-620276 ip
helpers_test.go:175: Cleaning up "static-ip-620276" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p static-ip-620276
E0731 10:50:58.881334   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/client.crt: no such file or directory
helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p static-ip-620276: (2.078756853s)
--- PASS: TestKicStaticIP (26.03s)

                                                
                                    
x
+
TestMainNoArgs (0.04s)

                                                
                                                
=== RUN   TestMainNoArgs
main_test.go:68: (dbg) Run:  out/minikube-linux-amd64
--- PASS: TestMainNoArgs (0.04s)

                                                
                                    
x
+
TestMinikubeProfile (51.42s)

                                                
                                                
=== RUN   TestMinikubeProfile
minikube_profile_test.go:44: (dbg) Run:  out/minikube-linux-amd64 start -p first-986216 --driver=docker  --container-runtime=crio
minikube_profile_test.go:44: (dbg) Done: out/minikube-linux-amd64 start -p first-986216 --driver=docker  --container-runtime=crio: (23.053795333s)
minikube_profile_test.go:44: (dbg) Run:  out/minikube-linux-amd64 start -p second-989598 --driver=docker  --container-runtime=crio
minikube_profile_test.go:44: (dbg) Done: out/minikube-linux-amd64 start -p second-989598 --driver=docker  --container-runtime=crio: (23.487939771s)
minikube_profile_test.go:51: (dbg) Run:  out/minikube-linux-amd64 profile first-986216
minikube_profile_test.go:55: (dbg) Run:  out/minikube-linux-amd64 profile list -ojson
minikube_profile_test.go:51: (dbg) Run:  out/minikube-linux-amd64 profile second-989598
minikube_profile_test.go:55: (dbg) Run:  out/minikube-linux-amd64 profile list -ojson
helpers_test.go:175: Cleaning up "second-989598" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p second-989598
helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p second-989598: (1.807592728s)
helpers_test.go:175: Cleaning up "first-986216" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p first-986216
helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p first-986216: (2.160809561s)
--- PASS: TestMinikubeProfile (51.42s)

                                                
                                    
x
+
TestMountStart/serial/StartWithMountFirst (7.91s)

                                                
                                                
=== RUN   TestMountStart/serial/StartWithMountFirst
mount_start_test.go:98: (dbg) Run:  out/minikube-linux-amd64 start -p mount-start-1-515905 --memory=2048 --mount --mount-gid 0 --mount-msize 6543 --mount-port 46464 --mount-uid 0 --no-kubernetes --driver=docker  --container-runtime=crio
mount_start_test.go:98: (dbg) Done: out/minikube-linux-amd64 start -p mount-start-1-515905 --memory=2048 --mount --mount-gid 0 --mount-msize 6543 --mount-port 46464 --mount-uid 0 --no-kubernetes --driver=docker  --container-runtime=crio: (6.912705655s)
--- PASS: TestMountStart/serial/StartWithMountFirst (7.91s)

                                                
                                    
x
+
TestMountStart/serial/VerifyMountFirst (0.23s)

                                                
                                                
=== RUN   TestMountStart/serial/VerifyMountFirst
mount_start_test.go:114: (dbg) Run:  out/minikube-linux-amd64 -p mount-start-1-515905 ssh -- ls /minikube-host
--- PASS: TestMountStart/serial/VerifyMountFirst (0.23s)

                                                
                                    
x
+
TestMountStart/serial/StartWithMountSecond (5.04s)

                                                
                                                
=== RUN   TestMountStart/serial/StartWithMountSecond
mount_start_test.go:98: (dbg) Run:  out/minikube-linux-amd64 start -p mount-start-2-532973 --memory=2048 --mount --mount-gid 0 --mount-msize 6543 --mount-port 46465 --mount-uid 0 --no-kubernetes --driver=docker  --container-runtime=crio
mount_start_test.go:98: (dbg) Done: out/minikube-linux-amd64 start -p mount-start-2-532973 --memory=2048 --mount --mount-gid 0 --mount-msize 6543 --mount-port 46465 --mount-uid 0 --no-kubernetes --driver=docker  --container-runtime=crio: (4.041816003s)
--- PASS: TestMountStart/serial/StartWithMountSecond (5.04s)

                                                
                                    
x
+
TestMountStart/serial/VerifyMountSecond (0.23s)

                                                
                                                
=== RUN   TestMountStart/serial/VerifyMountSecond
mount_start_test.go:114: (dbg) Run:  out/minikube-linux-amd64 -p mount-start-2-532973 ssh -- ls /minikube-host
--- PASS: TestMountStart/serial/VerifyMountSecond (0.23s)

                                                
                                    
x
+
TestMountStart/serial/DeleteFirst (1.59s)

                                                
                                                
=== RUN   TestMountStart/serial/DeleteFirst
pause_test.go:132: (dbg) Run:  out/minikube-linux-amd64 delete -p mount-start-1-515905 --alsologtostderr -v=5
pause_test.go:132: (dbg) Done: out/minikube-linux-amd64 delete -p mount-start-1-515905 --alsologtostderr -v=5: (1.593552686s)
--- PASS: TestMountStart/serial/DeleteFirst (1.59s)

                                                
                                    
x
+
TestMountStart/serial/VerifyMountPostDelete (0.23s)

                                                
                                                
=== RUN   TestMountStart/serial/VerifyMountPostDelete
mount_start_test.go:114: (dbg) Run:  out/minikube-linux-amd64 -p mount-start-2-532973 ssh -- ls /minikube-host
--- PASS: TestMountStart/serial/VerifyMountPostDelete (0.23s)

                                                
                                    
x
+
TestMountStart/serial/Stop (1.2s)

                                                
                                                
=== RUN   TestMountStart/serial/Stop
mount_start_test.go:155: (dbg) Run:  out/minikube-linux-amd64 stop -p mount-start-2-532973
mount_start_test.go:155: (dbg) Done: out/minikube-linux-amd64 stop -p mount-start-2-532973: (1.195323742s)
--- PASS: TestMountStart/serial/Stop (1.20s)

                                                
                                    
x
+
TestMountStart/serial/RestartStopped (6.94s)

                                                
                                                
=== RUN   TestMountStart/serial/RestartStopped
mount_start_test.go:166: (dbg) Run:  out/minikube-linux-amd64 start -p mount-start-2-532973
E0731 10:52:12.322374   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/functional-683521/client.crt: no such file or directory
mount_start_test.go:166: (dbg) Done: out/minikube-linux-amd64 start -p mount-start-2-532973: (5.939975934s)
--- PASS: TestMountStart/serial/RestartStopped (6.94s)

                                                
                                    
x
+
TestMountStart/serial/VerifyMountPostStop (0.23s)

                                                
                                                
=== RUN   TestMountStart/serial/VerifyMountPostStop
mount_start_test.go:114: (dbg) Run:  out/minikube-linux-amd64 -p mount-start-2-532973 ssh -- ls /minikube-host
--- PASS: TestMountStart/serial/VerifyMountPostStop (0.23s)

                                                
                                    
x
+
TestMultiNode/serial/FreshStart2Nodes (114.53s)

                                                
                                                
=== RUN   TestMultiNode/serial/FreshStart2Nodes
multinode_test.go:85: (dbg) Run:  out/minikube-linux-amd64 start -p multinode-776386 --wait=true --memory=2200 --nodes=2 -v=8 --alsologtostderr --driver=docker  --container-runtime=crio
E0731 10:52:16.814160   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/client.crt: no such file or directory
E0731 10:52:40.005764   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/functional-683521/client.crt: no such file or directory
multinode_test.go:85: (dbg) Done: out/minikube-linux-amd64 start -p multinode-776386 --wait=true --memory=2200 --nodes=2 -v=8 --alsologtostderr --driver=docker  --container-runtime=crio: (1m54.104083037s)
multinode_test.go:91: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 status --alsologtostderr
--- PASS: TestMultiNode/serial/FreshStart2Nodes (114.53s)

                                                
                                    
x
+
TestMultiNode/serial/DeployApp2Nodes (3.6s)

                                                
                                                
=== RUN   TestMultiNode/serial/DeployApp2Nodes
multinode_test.go:481: (dbg) Run:  out/minikube-linux-amd64 kubectl -p multinode-776386 -- apply -f ./testdata/multinodes/multinode-pod-dns-test.yaml
multinode_test.go:486: (dbg) Run:  out/minikube-linux-amd64 kubectl -p multinode-776386 -- rollout status deployment/busybox
multinode_test.go:486: (dbg) Done: out/minikube-linux-amd64 kubectl -p multinode-776386 -- rollout status deployment/busybox: (1.850229209s)
multinode_test.go:493: (dbg) Run:  out/minikube-linux-amd64 kubectl -p multinode-776386 -- get pods -o jsonpath='{.items[*].status.podIP}'
multinode_test.go:516: (dbg) Run:  out/minikube-linux-amd64 kubectl -p multinode-776386 -- get pods -o jsonpath='{.items[*].metadata.name}'
multinode_test.go:524: (dbg) Run:  out/minikube-linux-amd64 kubectl -p multinode-776386 -- exec busybox-67b7f59bb-trlh5 -- nslookup kubernetes.io
multinode_test.go:524: (dbg) Run:  out/minikube-linux-amd64 kubectl -p multinode-776386 -- exec busybox-67b7f59bb-tvf5p -- nslookup kubernetes.io
multinode_test.go:534: (dbg) Run:  out/minikube-linux-amd64 kubectl -p multinode-776386 -- exec busybox-67b7f59bb-trlh5 -- nslookup kubernetes.default
multinode_test.go:534: (dbg) Run:  out/minikube-linux-amd64 kubectl -p multinode-776386 -- exec busybox-67b7f59bb-tvf5p -- nslookup kubernetes.default
multinode_test.go:542: (dbg) Run:  out/minikube-linux-amd64 kubectl -p multinode-776386 -- exec busybox-67b7f59bb-trlh5 -- nslookup kubernetes.default.svc.cluster.local
multinode_test.go:542: (dbg) Run:  out/minikube-linux-amd64 kubectl -p multinode-776386 -- exec busybox-67b7f59bb-tvf5p -- nslookup kubernetes.default.svc.cluster.local
--- PASS: TestMultiNode/serial/DeployApp2Nodes (3.60s)

                                                
                                    
x
+
TestMultiNode/serial/AddNode (48.77s)

                                                
                                                
=== RUN   TestMultiNode/serial/AddNode
multinode_test.go:110: (dbg) Run:  out/minikube-linux-amd64 node add -p multinode-776386 -v 3 --alsologtostderr
E0731 10:54:32.969754   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/client.crt: no such file or directory
E0731 10:55:00.654573   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/client.crt: no such file or directory
multinode_test.go:110: (dbg) Done: out/minikube-linux-amd64 node add -p multinode-776386 -v 3 --alsologtostderr: (48.216209665s)
multinode_test.go:116: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 status --alsologtostderr
--- PASS: TestMultiNode/serial/AddNode (48.77s)

                                                
                                    
x
+
TestMultiNode/serial/ProfileList (0.26s)

                                                
                                                
=== RUN   TestMultiNode/serial/ProfileList
multinode_test.go:132: (dbg) Run:  out/minikube-linux-amd64 profile list --output json
--- PASS: TestMultiNode/serial/ProfileList (0.26s)

                                                
                                    
x
+
TestMultiNode/serial/CopyFile (8.49s)

                                                
                                                
=== RUN   TestMultiNode/serial/CopyFile
multinode_test.go:173: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 status --output json --alsologtostderr
helpers_test.go:556: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 cp testdata/cp-test.txt multinode-776386:/home/docker/cp-test.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 ssh -n multinode-776386 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 cp multinode-776386:/home/docker/cp-test.txt /tmp/TestMultiNodeserialCopyFile2865032769/001/cp-test_multinode-776386.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 ssh -n multinode-776386 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 cp multinode-776386:/home/docker/cp-test.txt multinode-776386-m02:/home/docker/cp-test_multinode-776386_multinode-776386-m02.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 ssh -n multinode-776386 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 ssh -n multinode-776386-m02 "sudo cat /home/docker/cp-test_multinode-776386_multinode-776386-m02.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 cp multinode-776386:/home/docker/cp-test.txt multinode-776386-m03:/home/docker/cp-test_multinode-776386_multinode-776386-m03.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 ssh -n multinode-776386 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 ssh -n multinode-776386-m03 "sudo cat /home/docker/cp-test_multinode-776386_multinode-776386-m03.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 cp testdata/cp-test.txt multinode-776386-m02:/home/docker/cp-test.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 ssh -n multinode-776386-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 cp multinode-776386-m02:/home/docker/cp-test.txt /tmp/TestMultiNodeserialCopyFile2865032769/001/cp-test_multinode-776386-m02.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 ssh -n multinode-776386-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 cp multinode-776386-m02:/home/docker/cp-test.txt multinode-776386:/home/docker/cp-test_multinode-776386-m02_multinode-776386.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 ssh -n multinode-776386-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 ssh -n multinode-776386 "sudo cat /home/docker/cp-test_multinode-776386-m02_multinode-776386.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 cp multinode-776386-m02:/home/docker/cp-test.txt multinode-776386-m03:/home/docker/cp-test_multinode-776386-m02_multinode-776386-m03.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 ssh -n multinode-776386-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 ssh -n multinode-776386-m03 "sudo cat /home/docker/cp-test_multinode-776386-m02_multinode-776386-m03.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 cp testdata/cp-test.txt multinode-776386-m03:/home/docker/cp-test.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 ssh -n multinode-776386-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 cp multinode-776386-m03:/home/docker/cp-test.txt /tmp/TestMultiNodeserialCopyFile2865032769/001/cp-test_multinode-776386-m03.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 ssh -n multinode-776386-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 cp multinode-776386-m03:/home/docker/cp-test.txt multinode-776386:/home/docker/cp-test_multinode-776386-m03_multinode-776386.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 ssh -n multinode-776386-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 ssh -n multinode-776386 "sudo cat /home/docker/cp-test_multinode-776386-m03_multinode-776386.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 cp multinode-776386-m03:/home/docker/cp-test.txt multinode-776386-m02:/home/docker/cp-test_multinode-776386-m03_multinode-776386-m02.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 ssh -n multinode-776386-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 ssh -n multinode-776386-m02 "sudo cat /home/docker/cp-test_multinode-776386-m03_multinode-776386-m02.txt"
--- PASS: TestMultiNode/serial/CopyFile (8.49s)

                                                
                                    
x
+
TestMultiNode/serial/StopNode (2.04s)

                                                
                                                
=== RUN   TestMultiNode/serial/StopNode
multinode_test.go:210: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 node stop m03
multinode_test.go:210: (dbg) Done: out/minikube-linux-amd64 -p multinode-776386 node stop m03: (1.182348039s)
multinode_test.go:216: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 status
multinode_test.go:216: (dbg) Non-zero exit: out/minikube-linux-amd64 -p multinode-776386 status: exit status 7 (430.62863ms)

                                                
                                                
-- stdout --
	multinode-776386
	type: Control Plane
	host: Running
	kubelet: Running
	apiserver: Running
	kubeconfig: Configured
	
	multinode-776386-m02
	type: Worker
	host: Running
	kubelet: Running
	
	multinode-776386-m03
	type: Worker
	host: Stopped
	kubelet: Stopped
	

                                                
                                                
-- /stdout --
multinode_test.go:223: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 status --alsologtostderr
multinode_test.go:223: (dbg) Non-zero exit: out/minikube-linux-amd64 -p multinode-776386 status --alsologtostderr: exit status 7 (422.629415ms)

                                                
                                                
-- stdout --
	multinode-776386
	type: Control Plane
	host: Running
	kubelet: Running
	apiserver: Running
	kubeconfig: Configured
	
	multinode-776386-m02
	type: Worker
	host: Running
	kubelet: Running
	
	multinode-776386-m03
	type: Worker
	host: Stopped
	kubelet: Stopped
	

                                                
                                                
-- /stdout --
** stderr ** 
	I0731 10:55:16.750817  109921 out.go:296] Setting OutFile to fd 1 ...
	I0731 10:55:16.750926  109921 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I0731 10:55:16.750935  109921 out.go:309] Setting ErrFile to fd 2...
	I0731 10:55:16.750939  109921 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I0731 10:55:16.751145  109921 root.go:338] Updating PATH: /home/jenkins/minikube-integration/16969-5799/.minikube/bin
	I0731 10:55:16.751327  109921 out.go:303] Setting JSON to false
	I0731 10:55:16.751351  109921 mustload.go:65] Loading cluster: multinode-776386
	I0731 10:55:16.751441  109921 notify.go:220] Checking for updates...
	I0731 10:55:16.752768  109921 config.go:182] Loaded profile config "multinode-776386": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.27.3
	I0731 10:55:16.752802  109921 status.go:255] checking status of multinode-776386 ...
	I0731 10:55:16.753400  109921 cli_runner.go:164] Run: docker container inspect multinode-776386 --format={{.State.Status}}
	I0731 10:55:16.771715  109921 status.go:330] multinode-776386 host status = "Running" (err=<nil>)
	I0731 10:55:16.771756  109921 host.go:66] Checking if "multinode-776386" exists ...
	I0731 10:55:16.771990  109921 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" multinode-776386
	I0731 10:55:16.786280  109921 host.go:66] Checking if "multinode-776386" exists ...
	I0731 10:55:16.786495  109921 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I0731 10:55:16.786527  109921 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-776386
	I0731 10:55:16.801427  109921 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32847 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/multinode-776386/id_rsa Username:docker}
	I0731 10:55:16.886819  109921 ssh_runner.go:195] Run: systemctl --version
	I0731 10:55:16.890382  109921 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I0731 10:55:16.899895  109921 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0731 10:55:16.948720  109921 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:3 ContainersRunning:2 ContainersPaused:0 ContainersStopped:1 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:41 OomKillDisable:true NGoroutines:56 SystemTime:2023-07-31 10:55:16.940661217 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1038-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archi
tecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33648062464 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-15 Labels:[] ExperimentalBuild:false ServerVersion:24.0.5 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:3dce8eb055cbb6872793272b4f20ed16117344f8 Expected:3dce8eb055cbb6872793272b4f20ed16117344f8} RuncCommit:{ID:v1.1.7-0-g860f061 Expected:v1.1.7-0-g860f061} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.20.2] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
	I0731 10:55:16.949256  109921 kubeconfig.go:92] found "multinode-776386" server: "https://192.168.58.2:8443"
	I0731 10:55:16.949276  109921 api_server.go:166] Checking apiserver status ...
	I0731 10:55:16.949306  109921 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I0731 10:55:16.959167  109921 ssh_runner.go:195] Run: sudo egrep ^[0-9]+:freezer: /proc/1453/cgroup
	I0731 10:55:16.967030  109921 api_server.go:182] apiserver freezer: "4:freezer:/docker/656908c1a0b09695a6320c09f205c4cc49aa9f1eeb5bc93be8b593553d25e518/crio/crio-a8fe782430637030087d8bc1aed4923706c64e2538bc36712c92c7cd92726f0f"
	I0731 10:55:16.967075  109921 ssh_runner.go:195] Run: sudo cat /sys/fs/cgroup/freezer/docker/656908c1a0b09695a6320c09f205c4cc49aa9f1eeb5bc93be8b593553d25e518/crio/crio-a8fe782430637030087d8bc1aed4923706c64e2538bc36712c92c7cd92726f0f/freezer.state
	I0731 10:55:16.974266  109921 api_server.go:204] freezer state: "THAWED"
	I0731 10:55:16.974287  109921 api_server.go:253] Checking apiserver healthz at https://192.168.58.2:8443/healthz ...
	I0731 10:55:16.978453  109921 api_server.go:279] https://192.168.58.2:8443/healthz returned 200:
	ok
	I0731 10:55:16.978471  109921 status.go:421] multinode-776386 apiserver status = Running (err=<nil>)
	I0731 10:55:16.978479  109921 status.go:257] multinode-776386 status: &{Name:multinode-776386 Host:Running Kubelet:Running APIServer:Running Kubeconfig:Configured Worker:false TimeToStop: DockerEnv: PodManEnv:}
	I0731 10:55:16.978493  109921 status.go:255] checking status of multinode-776386-m02 ...
	I0731 10:55:16.978704  109921 cli_runner.go:164] Run: docker container inspect multinode-776386-m02 --format={{.State.Status}}
	I0731 10:55:16.994050  109921 status.go:330] multinode-776386-m02 host status = "Running" (err=<nil>)
	I0731 10:55:16.994070  109921 host.go:66] Checking if "multinode-776386-m02" exists ...
	I0731 10:55:16.994353  109921 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" multinode-776386-m02
	I0731 10:55:17.010043  109921 host.go:66] Checking if "multinode-776386-m02" exists ...
	I0731 10:55:17.010309  109921 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I0731 10:55:17.010351  109921 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-776386-m02
	I0731 10:55:17.025794  109921 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32852 SSHKeyPath:/home/jenkins/minikube-integration/16969-5799/.minikube/machines/multinode-776386-m02/id_rsa Username:docker}
	I0731 10:55:17.110537  109921 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I0731 10:55:17.120022  109921 status.go:257] multinode-776386-m02 status: &{Name:multinode-776386-m02 Host:Running Kubelet:Running APIServer:Irrelevant Kubeconfig:Irrelevant Worker:true TimeToStop: DockerEnv: PodManEnv:}
	I0731 10:55:17.120050  109921 status.go:255] checking status of multinode-776386-m03 ...
	I0731 10:55:17.120273  109921 cli_runner.go:164] Run: docker container inspect multinode-776386-m03 --format={{.State.Status}}
	I0731 10:55:17.135300  109921 status.go:330] multinode-776386-m03 host status = "Stopped" (err=<nil>)
	I0731 10:55:17.135317  109921 status.go:343] host is not running, skipping remaining checks
	I0731 10:55:17.135323  109921 status.go:257] multinode-776386-m03 status: &{Name:multinode-776386-m03 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:true TimeToStop: DockerEnv: PodManEnv:}

                                                
                                                
** /stderr **
--- PASS: TestMultiNode/serial/StopNode (2.04s)

                                                
                                    
x
+
TestMultiNode/serial/StartAfterStop (10.45s)

                                                
                                                
=== RUN   TestMultiNode/serial/StartAfterStop
multinode_test.go:244: (dbg) Run:  docker version -f {{.Server.Version}}
multinode_test.go:254: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 node start m03 --alsologtostderr
multinode_test.go:254: (dbg) Done: out/minikube-linux-amd64 -p multinode-776386 node start m03 --alsologtostderr: (9.817689472s)
multinode_test.go:261: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 status
multinode_test.go:275: (dbg) Run:  kubectl get nodes
--- PASS: TestMultiNode/serial/StartAfterStop (10.45s)

                                                
                                    
x
+
TestMultiNode/serial/RestartKeepsNodes (109.82s)

                                                
                                                
=== RUN   TestMultiNode/serial/RestartKeepsNodes
multinode_test.go:283: (dbg) Run:  out/minikube-linux-amd64 node list -p multinode-776386
multinode_test.go:290: (dbg) Run:  out/minikube-linux-amd64 stop -p multinode-776386
multinode_test.go:290: (dbg) Done: out/minikube-linux-amd64 stop -p multinode-776386: (24.70459127s)
multinode_test.go:295: (dbg) Run:  out/minikube-linux-amd64 start -p multinode-776386 --wait=true -v=8 --alsologtostderr
E0731 10:55:58.882280   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/client.crt: no such file or directory
E0731 10:57:12.322373   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/functional-683521/client.crt: no such file or directory
multinode_test.go:295: (dbg) Done: out/minikube-linux-amd64 start -p multinode-776386 --wait=true -v=8 --alsologtostderr: (1m25.044151341s)
multinode_test.go:300: (dbg) Run:  out/minikube-linux-amd64 node list -p multinode-776386
--- PASS: TestMultiNode/serial/RestartKeepsNodes (109.82s)

                                                
                                    
x
+
TestMultiNode/serial/DeleteNode (4.57s)

                                                
                                                
=== RUN   TestMultiNode/serial/DeleteNode
multinode_test.go:394: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 node delete m03
multinode_test.go:394: (dbg) Done: out/minikube-linux-amd64 -p multinode-776386 node delete m03: (4.029429072s)
multinode_test.go:400: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 status --alsologtostderr
multinode_test.go:414: (dbg) Run:  docker volume ls
multinode_test.go:424: (dbg) Run:  kubectl get nodes
E0731 10:57:21.928581   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/client.crt: no such file or directory
multinode_test.go:432: (dbg) Run:  kubectl get nodes -o "go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'"
--- PASS: TestMultiNode/serial/DeleteNode (4.57s)

                                                
                                    
x
+
TestMultiNode/serial/StopMultiNode (23.71s)

                                                
                                                
=== RUN   TestMultiNode/serial/StopMultiNode
multinode_test.go:314: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 stop
multinode_test.go:314: (dbg) Done: out/minikube-linux-amd64 -p multinode-776386 stop: (23.56453786s)
multinode_test.go:320: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 status
multinode_test.go:320: (dbg) Non-zero exit: out/minikube-linux-amd64 -p multinode-776386 status: exit status 7 (74.616778ms)

                                                
                                                
-- stdout --
	multinode-776386
	type: Control Plane
	host: Stopped
	kubelet: Stopped
	apiserver: Stopped
	kubeconfig: Stopped
	
	multinode-776386-m02
	type: Worker
	host: Stopped
	kubelet: Stopped
	

                                                
                                                
-- /stdout --
multinode_test.go:327: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 status --alsologtostderr
multinode_test.go:327: (dbg) Non-zero exit: out/minikube-linux-amd64 -p multinode-776386 status --alsologtostderr: exit status 7 (72.949805ms)

                                                
                                                
-- stdout --
	multinode-776386
	type: Control Plane
	host: Stopped
	kubelet: Stopped
	apiserver: Stopped
	kubeconfig: Stopped
	
	multinode-776386-m02
	type: Worker
	host: Stopped
	kubelet: Stopped
	

                                                
                                                
-- /stdout --
** stderr ** 
	I0731 10:57:45.660942  120031 out.go:296] Setting OutFile to fd 1 ...
	I0731 10:57:45.661073  120031 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I0731 10:57:45.661085  120031 out.go:309] Setting ErrFile to fd 2...
	I0731 10:57:45.661092  120031 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I0731 10:57:45.661294  120031 root.go:338] Updating PATH: /home/jenkins/minikube-integration/16969-5799/.minikube/bin
	I0731 10:57:45.661447  120031 out.go:303] Setting JSON to false
	I0731 10:57:45.661473  120031 mustload.go:65] Loading cluster: multinode-776386
	I0731 10:57:45.661580  120031 notify.go:220] Checking for updates...
	I0731 10:57:45.661986  120031 config.go:182] Loaded profile config "multinode-776386": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.27.3
	I0731 10:57:45.662008  120031 status.go:255] checking status of multinode-776386 ...
	I0731 10:57:45.662483  120031 cli_runner.go:164] Run: docker container inspect multinode-776386 --format={{.State.Status}}
	I0731 10:57:45.681078  120031 status.go:330] multinode-776386 host status = "Stopped" (err=<nil>)
	I0731 10:57:45.681105  120031 status.go:343] host is not running, skipping remaining checks
	I0731 10:57:45.681112  120031 status.go:257] multinode-776386 status: &{Name:multinode-776386 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:false TimeToStop: DockerEnv: PodManEnv:}
	I0731 10:57:45.681132  120031 status.go:255] checking status of multinode-776386-m02 ...
	I0731 10:57:45.681362  120031 cli_runner.go:164] Run: docker container inspect multinode-776386-m02 --format={{.State.Status}}
	I0731 10:57:45.696839  120031 status.go:330] multinode-776386-m02 host status = "Stopped" (err=<nil>)
	I0731 10:57:45.696858  120031 status.go:343] host is not running, skipping remaining checks
	I0731 10:57:45.696863  120031 status.go:257] multinode-776386-m02 status: &{Name:multinode-776386-m02 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:true TimeToStop: DockerEnv: PodManEnv:}

                                                
                                                
** /stderr **
--- PASS: TestMultiNode/serial/StopMultiNode (23.71s)

                                                
                                    
x
+
TestMultiNode/serial/RestartMultiNode (79.16s)

                                                
                                                
=== RUN   TestMultiNode/serial/RestartMultiNode
multinode_test.go:344: (dbg) Run:  docker version -f {{.Server.Version}}
multinode_test.go:354: (dbg) Run:  out/minikube-linux-amd64 start -p multinode-776386 --wait=true -v=8 --alsologtostderr --driver=docker  --container-runtime=crio
multinode_test.go:354: (dbg) Done: out/minikube-linux-amd64 start -p multinode-776386 --wait=true -v=8 --alsologtostderr --driver=docker  --container-runtime=crio: (1m18.616433269s)
multinode_test.go:360: (dbg) Run:  out/minikube-linux-amd64 -p multinode-776386 status --alsologtostderr
multinode_test.go:374: (dbg) Run:  kubectl get nodes
multinode_test.go:382: (dbg) Run:  kubectl get nodes -o "go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'"
--- PASS: TestMultiNode/serial/RestartMultiNode (79.16s)

                                                
                                    
x
+
TestMultiNode/serial/ValidateNameConflict (25.89s)

                                                
                                                
=== RUN   TestMultiNode/serial/ValidateNameConflict
multinode_test.go:443: (dbg) Run:  out/minikube-linux-amd64 node list -p multinode-776386
multinode_test.go:452: (dbg) Run:  out/minikube-linux-amd64 start -p multinode-776386-m02 --driver=docker  --container-runtime=crio
multinode_test.go:452: (dbg) Non-zero exit: out/minikube-linux-amd64 start -p multinode-776386-m02 --driver=docker  --container-runtime=crio: exit status 14 (55.651456ms)

                                                
                                                
-- stdout --
	* [multinode-776386-m02] minikube v1.31.1 on Ubuntu 20.04 (kvm/amd64)
	  - MINIKUBE_LOCATION=16969
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/16969-5799/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/16969-5799/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-amd64
	  - MINIKUBE_FORCE_SYSTEMD=
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	! Profile name 'multinode-776386-m02' is duplicated with machine name 'multinode-776386-m02' in profile 'multinode-776386'
	X Exiting due to MK_USAGE: Profile name should be unique

                                                
                                                
** /stderr **
multinode_test.go:460: (dbg) Run:  out/minikube-linux-amd64 start -p multinode-776386-m03 --driver=docker  --container-runtime=crio
multinode_test.go:460: (dbg) Done: out/minikube-linux-amd64 start -p multinode-776386-m03 --driver=docker  --container-runtime=crio: (23.721393387s)
multinode_test.go:467: (dbg) Run:  out/minikube-linux-amd64 node add -p multinode-776386
multinode_test.go:467: (dbg) Non-zero exit: out/minikube-linux-amd64 node add -p multinode-776386: exit status 80 (248.097512ms)

                                                
                                                
-- stdout --
	* Adding node m03 to cluster multinode-776386
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	X Exiting due to GUEST_NODE_ADD: failed to add node: Node multinode-776386-m03 already exists in multinode-776386-m03 profile
	* 
	╭─────────────────────────────────────────────────────────────────────────────────────────────╮
	│                                                                                             │
	│    * If the above advice does not help, please let us know:                                 │
	│      https://github.com/kubernetes/minikube/issues/new/choose                               │
	│                                                                                             │
	│    * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue.    │
	│    * Please also attach the following file to the GitHub issue:                             │
	│    * - /tmp/minikube_node_040ea7097fd6ed71e65be9a474587f81f0ccd21d_0.log                    │
	│                                                                                             │
	╰─────────────────────────────────────────────────────────────────────────────────────────────╯

                                                
                                                
** /stderr **
multinode_test.go:472: (dbg) Run:  out/minikube-linux-amd64 delete -p multinode-776386-m03
multinode_test.go:472: (dbg) Done: out/minikube-linux-amd64 delete -p multinode-776386-m03: (1.825371335s)
--- PASS: TestMultiNode/serial/ValidateNameConflict (25.89s)

                                                
                                    
x
+
TestPreload (125.05s)

                                                
                                                
=== RUN   TestPreload
preload_test.go:44: (dbg) Run:  out/minikube-linux-amd64 start -p test-preload-728558 --memory=2200 --alsologtostderr --wait=true --preload=false --driver=docker  --container-runtime=crio --kubernetes-version=v1.24.4
preload_test.go:44: (dbg) Done: out/minikube-linux-amd64 start -p test-preload-728558 --memory=2200 --alsologtostderr --wait=true --preload=false --driver=docker  --container-runtime=crio --kubernetes-version=v1.24.4: (1m11.760967918s)
preload_test.go:52: (dbg) Run:  out/minikube-linux-amd64 -p test-preload-728558 image pull gcr.io/k8s-minikube/busybox
preload_test.go:58: (dbg) Run:  out/minikube-linux-amd64 stop -p test-preload-728558
preload_test.go:58: (dbg) Done: out/minikube-linux-amd64 stop -p test-preload-728558: (5.694116685s)
preload_test.go:66: (dbg) Run:  out/minikube-linux-amd64 start -p test-preload-728558 --memory=2200 --alsologtostderr -v=1 --wait=true --driver=docker  --container-runtime=crio
E0731 11:00:58.881952   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/client.crt: no such file or directory
preload_test.go:66: (dbg) Done: out/minikube-linux-amd64 start -p test-preload-728558 --memory=2200 --alsologtostderr -v=1 --wait=true --driver=docker  --container-runtime=crio: (44.30474329s)
preload_test.go:71: (dbg) Run:  out/minikube-linux-amd64 -p test-preload-728558 image list
helpers_test.go:175: Cleaning up "test-preload-728558" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p test-preload-728558
helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p test-preload-728558: (2.219421044s)
--- PASS: TestPreload (125.05s)

                                                
                                    
x
+
TestScheduledStopUnix (98.06s)

                                                
                                                
=== RUN   TestScheduledStopUnix
scheduled_stop_test.go:128: (dbg) Run:  out/minikube-linux-amd64 start -p scheduled-stop-945590 --memory=2048 --driver=docker  --container-runtime=crio
scheduled_stop_test.go:128: (dbg) Done: out/minikube-linux-amd64 start -p scheduled-stop-945590 --memory=2048 --driver=docker  --container-runtime=crio: (22.864621569s)
scheduled_stop_test.go:137: (dbg) Run:  out/minikube-linux-amd64 stop -p scheduled-stop-945590 --schedule 5m
scheduled_stop_test.go:191: (dbg) Run:  out/minikube-linux-amd64 status --format={{.TimeToStop}} -p scheduled-stop-945590 -n scheduled-stop-945590
scheduled_stop_test.go:169: signal error was:  <nil>
scheduled_stop_test.go:137: (dbg) Run:  out/minikube-linux-amd64 stop -p scheduled-stop-945590 --schedule 15s
scheduled_stop_test.go:169: signal error was:  os: process already finished
scheduled_stop_test.go:137: (dbg) Run:  out/minikube-linux-amd64 stop -p scheduled-stop-945590 --cancel-scheduled
E0731 11:02:12.324633   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/functional-683521/client.crt: no such file or directory
scheduled_stop_test.go:176: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Host}} -p scheduled-stop-945590 -n scheduled-stop-945590
scheduled_stop_test.go:205: (dbg) Run:  out/minikube-linux-amd64 status -p scheduled-stop-945590
scheduled_stop_test.go:137: (dbg) Run:  out/minikube-linux-amd64 stop -p scheduled-stop-945590 --schedule 15s
scheduled_stop_test.go:169: signal error was:  os: process already finished
scheduled_stop_test.go:205: (dbg) Run:  out/minikube-linux-amd64 status -p scheduled-stop-945590
scheduled_stop_test.go:205: (dbg) Non-zero exit: out/minikube-linux-amd64 status -p scheduled-stop-945590: exit status 7 (58.547685ms)

                                                
                                                
-- stdout --
	scheduled-stop-945590
	type: Control Plane
	host: Stopped
	kubelet: Stopped
	apiserver: Stopped
	kubeconfig: Stopped
	

                                                
                                                
-- /stdout --
scheduled_stop_test.go:176: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Host}} -p scheduled-stop-945590 -n scheduled-stop-945590
scheduled_stop_test.go:176: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Host}} -p scheduled-stop-945590 -n scheduled-stop-945590: exit status 7 (56.524478ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
scheduled_stop_test.go:176: status error: exit status 7 (may be ok)
helpers_test.go:175: Cleaning up "scheduled-stop-945590" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p scheduled-stop-945590
helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p scheduled-stop-945590: (4.031431707s)
--- PASS: TestScheduledStopUnix (98.06s)

                                                
                                    
x
+
TestInsufficientStorage (12.68s)

                                                
                                                
=== RUN   TestInsufficientStorage
status_test.go:50: (dbg) Run:  out/minikube-linux-amd64 start -p insufficient-storage-307929 --memory=2048 --output=json --wait=true --driver=docker  --container-runtime=crio
status_test.go:50: (dbg) Non-zero exit: out/minikube-linux-amd64 start -p insufficient-storage-307929 --memory=2048 --output=json --wait=true --driver=docker  --container-runtime=crio: exit status 26 (10.392598915s)

                                                
                                                
-- stdout --
	{"specversion":"1.0","id":"8d69f48e-0106-4083-a1d5-737e685cf1dd","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"0","message":"[insufficient-storage-307929] minikube v1.31.1 on Ubuntu 20.04 (kvm/amd64)","name":"Initial Minikube Setup","totalsteps":"19"}}
	{"specversion":"1.0","id":"12104c1c-750e-492d-a4ab-b7d957ab4dd7","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_LOCATION=16969"}}
	{"specversion":"1.0","id":"2e84acc9-b596-4365-942b-aa7055c72c0d","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true"}}
	{"specversion":"1.0","id":"915d3618-db65-4673-a132-3e3a2f436e9a","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"KUBECONFIG=/home/jenkins/minikube-integration/16969-5799/kubeconfig"}}
	{"specversion":"1.0","id":"2e8702e4-480c-4960-b8fa-c27322105516","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_HOME=/home/jenkins/minikube-integration/16969-5799/.minikube"}}
	{"specversion":"1.0","id":"171d3417-c928-452c-8200-e7aa0e926e0f","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_BIN=out/minikube-linux-amd64"}}
	{"specversion":"1.0","id":"5cc4932f-5330-420e-8a6f-e7c6341f3231","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_FORCE_SYSTEMD="}}
	{"specversion":"1.0","id":"594330dc-fc78-48dd-acfb-9813a252855b","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_TEST_STORAGE_CAPACITY=100"}}
	{"specversion":"1.0","id":"7ee15988-1f0e-448e-9dfe-2da7eccc38d3","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_TEST_AVAILABLE_STORAGE=19"}}
	{"specversion":"1.0","id":"411f499c-50d8-4e38-a79a-fd6a4e7352e7","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"1","message":"Using the docker driver based on user configuration","name":"Selecting Driver","totalsteps":"19"}}
	{"specversion":"1.0","id":"01f68f50-3e6b-4cc3-84df-33c8ffba2246","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"Using Docker driver with root privileges"}}
	{"specversion":"1.0","id":"0578387a-01cf-40c1-b6ee-0ea616d4221e","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"3","message":"Starting control plane node insufficient-storage-307929 in cluster insufficient-storage-307929","name":"Starting Node","totalsteps":"19"}}
	{"specversion":"1.0","id":"32b9b66a-e9ae-402d-8d60-5f3d6e7dc639","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"5","message":"Pulling base image ...","name":"Pulling Base Image","totalsteps":"19"}}
	{"specversion":"1.0","id":"d2b75a79-a0aa-4c33-96bc-b55957dcbdc8","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"8","message":"Creating docker container (CPUs=2, Memory=2048MB) ...","name":"Creating Container","totalsteps":"19"}}
	{"specversion":"1.0","id":"446f998c-0771-4c83-bd56-9ba7c1e5edec","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.error","datacontenttype":"application/json","data":{"advice":"Try one or more of the following to free up space on the device:\n\t\n\t\t\t1. Run \"docker system prune\" to remove unused Docker data (optionally with \"-a\")\n\t\t\t2. Increase the storage allocated to Docker for Desktop by clicking on:\n\t\t\t\tDocker icon \u003e Preferences \u003e Resources \u003e Disk Image Size\n\t\t\t3. Run \"minikube ssh -- docker system prune\" if using the Docker container runtime","exitcode":"26","issues":"https://github.com/kubernetes/minikube/issues/9024","message":"Docker is out of disk space! (/var is at 100%% of capacity). You can pass '--force' to skip this check.","name":"RSRC_DOCKER_STORAGE","url":""}}

                                                
                                                
-- /stdout --
status_test.go:76: (dbg) Run:  out/minikube-linux-amd64 status -p insufficient-storage-307929 --output=json --layout=cluster
status_test.go:76: (dbg) Non-zero exit: out/minikube-linux-amd64 status -p insufficient-storage-307929 --output=json --layout=cluster: exit status 7 (246.42201ms)

                                                
                                                
-- stdout --
	{"Name":"insufficient-storage-307929","StatusCode":507,"StatusName":"InsufficientStorage","StatusDetail":"/var is almost out of disk space","Step":"Creating Container","StepDetail":"Creating docker container (CPUs=2, Memory=2048MB) ...","BinaryVersion":"v1.31.1","Components":{"kubeconfig":{"Name":"kubeconfig","StatusCode":500,"StatusName":"Error"}},"Nodes":[{"Name":"insufficient-storage-307929","StatusCode":507,"StatusName":"InsufficientStorage","Components":{"apiserver":{"Name":"apiserver","StatusCode":405,"StatusName":"Stopped"},"kubelet":{"Name":"kubelet","StatusCode":405,"StatusName":"Stopped"}}}]}

                                                
                                                
-- /stdout --
** stderr ** 
	E0731 11:03:29.915771  141654 status.go:415] kubeconfig endpoint: extract IP: "insufficient-storage-307929" does not appear in /home/jenkins/minikube-integration/16969-5799/kubeconfig

                                                
                                                
** /stderr **
status_test.go:76: (dbg) Run:  out/minikube-linux-amd64 status -p insufficient-storage-307929 --output=json --layout=cluster
status_test.go:76: (dbg) Non-zero exit: out/minikube-linux-amd64 status -p insufficient-storage-307929 --output=json --layout=cluster: exit status 7 (244.522608ms)

                                                
                                                
-- stdout --
	{"Name":"insufficient-storage-307929","StatusCode":507,"StatusName":"InsufficientStorage","StatusDetail":"/var is almost out of disk space","BinaryVersion":"v1.31.1","Components":{"kubeconfig":{"Name":"kubeconfig","StatusCode":500,"StatusName":"Error"}},"Nodes":[{"Name":"insufficient-storage-307929","StatusCode":507,"StatusName":"InsufficientStorage","Components":{"apiserver":{"Name":"apiserver","StatusCode":405,"StatusName":"Stopped"},"kubelet":{"Name":"kubelet","StatusCode":405,"StatusName":"Stopped"}}}]}

                                                
                                                
-- /stdout --
** stderr ** 
	E0731 11:03:30.160536  141743 status.go:415] kubeconfig endpoint: extract IP: "insufficient-storage-307929" does not appear in /home/jenkins/minikube-integration/16969-5799/kubeconfig
	E0731 11:03:30.169669  141743 status.go:559] unable to read event log: stat: stat /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/insufficient-storage-307929/events.json: no such file or directory

                                                
                                                
** /stderr **
helpers_test.go:175: Cleaning up "insufficient-storage-307929" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p insufficient-storage-307929
helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p insufficient-storage-307929: (1.796893171s)
--- PASS: TestInsufficientStorage (12.68s)

                                                
                                    
x
+
TestKubernetesUpgrade (359.78s)

                                                
                                                
=== RUN   TestKubernetesUpgrade
=== PAUSE TestKubernetesUpgrade

                                                
                                                

                                                
                                                
=== CONT  TestKubernetesUpgrade
version_upgrade_test.go:234: (dbg) Run:  out/minikube-linux-amd64 start -p kubernetes-upgrade-954958 --memory=2200 --kubernetes-version=v1.16.0 --alsologtostderr -v=1 --driver=docker  --container-runtime=crio
version_upgrade_test.go:234: (dbg) Done: out/minikube-linux-amd64 start -p kubernetes-upgrade-954958 --memory=2200 --kubernetes-version=v1.16.0 --alsologtostderr -v=1 --driver=docker  --container-runtime=crio: (49.569459856s)
version_upgrade_test.go:239: (dbg) Run:  out/minikube-linux-amd64 stop -p kubernetes-upgrade-954958
version_upgrade_test.go:239: (dbg) Done: out/minikube-linux-amd64 stop -p kubernetes-upgrade-954958: (8.725434613s)
version_upgrade_test.go:244: (dbg) Run:  out/minikube-linux-amd64 -p kubernetes-upgrade-954958 status --format={{.Host}}
version_upgrade_test.go:244: (dbg) Non-zero exit: out/minikube-linux-amd64 -p kubernetes-upgrade-954958 status --format={{.Host}}: exit status 7 (72.309727ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
version_upgrade_test.go:246: status error: exit status 7 (may be ok)
version_upgrade_test.go:255: (dbg) Run:  out/minikube-linux-amd64 start -p kubernetes-upgrade-954958 --memory=2200 --kubernetes-version=v1.27.3 --alsologtostderr -v=1 --driver=docker  --container-runtime=crio
version_upgrade_test.go:255: (dbg) Done: out/minikube-linux-amd64 start -p kubernetes-upgrade-954958 --memory=2200 --kubernetes-version=v1.27.3 --alsologtostderr -v=1 --driver=docker  --container-runtime=crio: (4m32.031141544s)
version_upgrade_test.go:260: (dbg) Run:  kubectl --context kubernetes-upgrade-954958 version --output=json
version_upgrade_test.go:279: Attempting to downgrade Kubernetes (should fail)
version_upgrade_test.go:281: (dbg) Run:  out/minikube-linux-amd64 start -p kubernetes-upgrade-954958 --memory=2200 --kubernetes-version=v1.16.0 --driver=docker  --container-runtime=crio
version_upgrade_test.go:281: (dbg) Non-zero exit: out/minikube-linux-amd64 start -p kubernetes-upgrade-954958 --memory=2200 --kubernetes-version=v1.16.0 --driver=docker  --container-runtime=crio: exit status 106 (80.202832ms)

                                                
                                                
-- stdout --
	* [kubernetes-upgrade-954958] minikube v1.31.1 on Ubuntu 20.04 (kvm/amd64)
	  - MINIKUBE_LOCATION=16969
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/16969-5799/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/16969-5799/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-amd64
	  - MINIKUBE_FORCE_SYSTEMD=
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	X Exiting due to K8S_DOWNGRADE_UNSUPPORTED: Unable to safely downgrade existing Kubernetes v1.27.3 cluster to v1.16.0
	* Suggestion: 
	
	    1) Recreate the cluster with Kubernetes 1.16.0, by running:
	    
	    minikube delete -p kubernetes-upgrade-954958
	    minikube start -p kubernetes-upgrade-954958 --kubernetes-version=v1.16.0
	    
	    2) Create a second cluster with Kubernetes 1.16.0, by running:
	    
	    minikube start -p kubernetes-upgrade-9549582 --kubernetes-version=v1.16.0
	    
	    3) Use the existing cluster at version Kubernetes 1.27.3, by running:
	    
	    minikube start -p kubernetes-upgrade-954958 --kubernetes-version=v1.27.3
	    

                                                
                                                
** /stderr **
version_upgrade_test.go:285: Attempting restart after unsuccessful downgrade
version_upgrade_test.go:287: (dbg) Run:  out/minikube-linux-amd64 start -p kubernetes-upgrade-954958 --memory=2200 --kubernetes-version=v1.27.3 --alsologtostderr -v=1 --driver=docker  --container-runtime=crio
version_upgrade_test.go:287: (dbg) Done: out/minikube-linux-amd64 start -p kubernetes-upgrade-954958 --memory=2200 --kubernetes-version=v1.27.3 --alsologtostderr -v=1 --driver=docker  --container-runtime=crio: (27.009317478s)
helpers_test.go:175: Cleaning up "kubernetes-upgrade-954958" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p kubernetes-upgrade-954958
helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p kubernetes-upgrade-954958: (2.220117503s)
--- PASS: TestKubernetesUpgrade (359.78s)

                                                
                                    
x
+
TestMissingContainerUpgrade (133.12s)

                                                
                                                
=== RUN   TestMissingContainerUpgrade
=== PAUSE TestMissingContainerUpgrade

                                                
                                                

                                                
                                                
=== CONT  TestMissingContainerUpgrade
version_upgrade_test.go:321: (dbg) Run:  /tmp/minikube-v1.9.0.995002972.exe start -p missing-upgrade-068340 --memory=2200 --driver=docker  --container-runtime=crio
version_upgrade_test.go:321: (dbg) Done: /tmp/minikube-v1.9.0.995002972.exe start -p missing-upgrade-068340 --memory=2200 --driver=docker  --container-runtime=crio: (1m13.097320678s)
version_upgrade_test.go:330: (dbg) Run:  docker stop missing-upgrade-068340
version_upgrade_test.go:335: (dbg) Run:  docker rm missing-upgrade-068340
version_upgrade_test.go:341: (dbg) Run:  out/minikube-linux-amd64 start -p missing-upgrade-068340 --memory=2200 --alsologtostderr -v=1 --driver=docker  --container-runtime=crio
version_upgrade_test.go:341: (dbg) Done: out/minikube-linux-amd64 start -p missing-upgrade-068340 --memory=2200 --alsologtostderr -v=1 --driver=docker  --container-runtime=crio: (56.771838997s)
helpers_test.go:175: Cleaning up "missing-upgrade-068340" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p missing-upgrade-068340
helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p missing-upgrade-068340: (2.100618362s)
--- PASS: TestMissingContainerUpgrade (133.12s)

                                                
                                    
x
+
TestNoKubernetes/serial/StartNoK8sWithVersion (0.07s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/StartNoK8sWithVersion
no_kubernetes_test.go:83: (dbg) Run:  out/minikube-linux-amd64 start -p NoKubernetes-915583 --no-kubernetes --kubernetes-version=1.20 --driver=docker  --container-runtime=crio
no_kubernetes_test.go:83: (dbg) Non-zero exit: out/minikube-linux-amd64 start -p NoKubernetes-915583 --no-kubernetes --kubernetes-version=1.20 --driver=docker  --container-runtime=crio: exit status 14 (74.222624ms)

                                                
                                                
-- stdout --
	* [NoKubernetes-915583] minikube v1.31.1 on Ubuntu 20.04 (kvm/amd64)
	  - MINIKUBE_LOCATION=16969
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/16969-5799/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/16969-5799/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-amd64
	  - MINIKUBE_FORCE_SYSTEMD=
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	X Exiting due to MK_USAGE: cannot specify --kubernetes-version with --no-kubernetes,
	to unset a global config run:
	
	$ minikube config unset kubernetes-version

                                                
                                                
** /stderr **
--- PASS: TestNoKubernetes/serial/StartNoK8sWithVersion (0.07s)

                                                
                                    
x
+
TestNoKubernetes/serial/StartWithK8s (35.26s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/StartWithK8s
no_kubernetes_test.go:95: (dbg) Run:  out/minikube-linux-amd64 start -p NoKubernetes-915583 --driver=docker  --container-runtime=crio
no_kubernetes_test.go:95: (dbg) Done: out/minikube-linux-amd64 start -p NoKubernetes-915583 --driver=docker  --container-runtime=crio: (34.894233329s)
no_kubernetes_test.go:200: (dbg) Run:  out/minikube-linux-amd64 -p NoKubernetes-915583 status -o json
--- PASS: TestNoKubernetes/serial/StartWithK8s (35.26s)

                                                
                                    
x
+
TestNetworkPlugins/group/false (8.67s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/false
net_test.go:246: (dbg) Run:  out/minikube-linux-amd64 start -p false-577222 --memory=2048 --alsologtostderr --cni=false --driver=docker  --container-runtime=crio
net_test.go:246: (dbg) Non-zero exit: out/minikube-linux-amd64 start -p false-577222 --memory=2048 --alsologtostderr --cni=false --driver=docker  --container-runtime=crio: exit status 14 (205.292642ms)

                                                
                                                
-- stdout --
	* [false-577222] minikube v1.31.1 on Ubuntu 20.04 (kvm/amd64)
	  - MINIKUBE_LOCATION=16969
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/16969-5799/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/16969-5799/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-amd64
	  - MINIKUBE_FORCE_SYSTEMD=
	* Using the docker driver based on user configuration
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	I0731 11:03:35.518827  143881 out.go:296] Setting OutFile to fd 1 ...
	I0731 11:03:35.518977  143881 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I0731 11:03:35.518989  143881 out.go:309] Setting ErrFile to fd 2...
	I0731 11:03:35.518996  143881 out.go:343] TERM=,COLORTERM=, which probably does not support color
	I0731 11:03:35.519340  143881 root.go:338] Updating PATH: /home/jenkins/minikube-integration/16969-5799/.minikube/bin
	I0731 11:03:35.520168  143881 out.go:303] Setting JSON to false
	I0731 11:03:35.521463  143881 start.go:128] hostinfo: {"hostname":"ubuntu-20-agent-15","uptime":2768,"bootTime":1690798648,"procs":597,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1038-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
	I0731 11:03:35.521526  143881 start.go:138] virtualization: kvm guest
	I0731 11:03:35.525164  143881 out.go:177] * [false-577222] minikube v1.31.1 on Ubuntu 20.04 (kvm/amd64)
	I0731 11:03:35.527169  143881 out.go:177]   - MINIKUBE_LOCATION=16969
	I0731 11:03:35.528885  143881 out.go:177]   - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I0731 11:03:35.527220  143881 notify.go:220] Checking for updates...
	I0731 11:03:35.531577  143881 out.go:177]   - KUBECONFIG=/home/jenkins/minikube-integration/16969-5799/kubeconfig
	I0731 11:03:35.533265  143881 out.go:177]   - MINIKUBE_HOME=/home/jenkins/minikube-integration/16969-5799/.minikube
	I0731 11:03:35.535036  143881 out.go:177]   - MINIKUBE_BIN=out/minikube-linux-amd64
	I0731 11:03:35.536625  143881 out.go:177]   - MINIKUBE_FORCE_SYSTEMD=
	I0731 11:03:35.538715  143881 config.go:182] Loaded profile config "NoKubernetes-915583": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.27.3
	I0731 11:03:35.538873  143881 config.go:182] Loaded profile config "force-systemd-env-929753": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.27.3
	I0731 11:03:35.539023  143881 config.go:182] Loaded profile config "offline-crio-930318": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.27.3
	I0731 11:03:35.539162  143881 driver.go:373] Setting default libvirt URI to qemu:///system
	I0731 11:03:35.572373  143881 docker.go:121] docker version: linux-24.0.5:Docker Engine - Community
	I0731 11:03:35.572454  143881 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0731 11:03:35.653331  143881 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:3 ContainersRunning:3 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:54 OomKillDisable:true NGoroutines:75 SystemTime:2023-07-31 11:03:35.643254875 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1038-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Archi
tecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33648062464 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-15 Labels:[] ExperimentalBuild:false ServerVersion:24.0.5 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:3dce8eb055cbb6872793272b4f20ed16117344f8 Expected:3dce8eb055cbb6872793272b4f20ed16117344f8} RuncCommit:{ID:v1.1.7-0-g860f061 Expected:v1.1.7-0-g860f061} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.11.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.20.2] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
	I0731 11:03:35.653479  143881 docker.go:294] overlay module found
	I0731 11:03:35.658207  143881 out.go:177] * Using the docker driver based on user configuration
	I0731 11:03:35.659901  143881 start.go:298] selected driver: docker
	I0731 11:03:35.659924  143881 start.go:898] validating driver "docker" against <nil>
	I0731 11:03:35.659940  143881 start.go:909] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
	I0731 11:03:35.662653  143881 out.go:177] 
	W0731 11:03:35.664398  143881 out.go:239] X Exiting due to MK_USAGE: The "crio" container runtime requires CNI
	X Exiting due to MK_USAGE: The "crio" container runtime requires CNI
	I0731 11:03:35.666082  143881 out.go:177] 

                                                
                                                
** /stderr **
net_test.go:88: 
----------------------- debugLogs start: false-577222 [pass: true] --------------------------------
>>> netcat: nslookup kubernetes.default:
Error in configuration: context was not found for specified context: false-577222

                                                
                                                

                                                
                                                
>>> netcat: nslookup debug kubernetes.default a-records:
Error in configuration: context was not found for specified context: false-577222

                                                
                                                

                                                
                                                
>>> netcat: dig search kubernetes.default:
Error in configuration: context was not found for specified context: false-577222

                                                
                                                

                                                
                                                
>>> netcat: dig @10.96.0.10 kubernetes.default.svc.cluster.local udp/53:
Error in configuration: context was not found for specified context: false-577222

                                                
                                                

                                                
                                                
>>> netcat: dig @10.96.0.10 kubernetes.default.svc.cluster.local tcp/53:
Error in configuration: context was not found for specified context: false-577222

                                                
                                                

                                                
                                                
>>> netcat: nc 10.96.0.10 udp/53:
Error in configuration: context was not found for specified context: false-577222

                                                
                                                

                                                
                                                
>>> netcat: nc 10.96.0.10 tcp/53:
Error in configuration: context was not found for specified context: false-577222

                                                
                                                

                                                
                                                
>>> netcat: /etc/nsswitch.conf:
Error in configuration: context was not found for specified context: false-577222

                                                
                                                

                                                
                                                
>>> netcat: /etc/hosts:
Error in configuration: context was not found for specified context: false-577222

                                                
                                                

                                                
                                                
>>> netcat: /etc/resolv.conf:
Error in configuration: context was not found for specified context: false-577222

                                                
                                                

                                                
                                                
>>> host: /etc/nsswitch.conf:
* Profile "false-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-577222"

                                                
                                                

                                                
                                                
>>> host: /etc/hosts:
* Profile "false-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-577222"

                                                
                                                

                                                
                                                
>>> host: /etc/resolv.conf:
* Profile "false-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-577222"

                                                
                                                

                                                
                                                
>>> k8s: nodes, services, endpoints, daemon sets, deployments and pods, :
Error in configuration: context was not found for specified context: false-577222

                                                
                                                

                                                
                                                
>>> host: crictl pods:
* Profile "false-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-577222"

                                                
                                                

                                                
                                                
>>> host: crictl containers:
* Profile "false-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-577222"

                                                
                                                

                                                
                                                
>>> k8s: describe netcat deployment:
error: context "false-577222" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe netcat pod(s):
error: context "false-577222" does not exist

                                                
                                                

                                                
                                                
>>> k8s: netcat logs:
error: context "false-577222" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe coredns deployment:
error: context "false-577222" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe coredns pods:
error: context "false-577222" does not exist

                                                
                                                

                                                
                                                
>>> k8s: coredns logs:
error: context "false-577222" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe api server pod(s):
error: context "false-577222" does not exist

                                                
                                                

                                                
                                                
>>> k8s: api server logs:
error: context "false-577222" does not exist

                                                
                                                

                                                
                                                
>>> host: /etc/cni:
* Profile "false-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-577222"

                                                
                                                

                                                
                                                
>>> host: ip a s:
* Profile "false-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-577222"

                                                
                                                

                                                
                                                
>>> host: ip r s:
* Profile "false-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-577222"

                                                
                                                

                                                
                                                
>>> host: iptables-save:
* Profile "false-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-577222"

                                                
                                                

                                                
                                                
>>> host: iptables table nat:
* Profile "false-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-577222"

                                                
                                                

                                                
                                                
>>> k8s: describe kube-proxy daemon set:
error: context "false-577222" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe kube-proxy pod(s):
error: context "false-577222" does not exist

                                                
                                                

                                                
                                                
>>> k8s: kube-proxy logs:
error: context "false-577222" does not exist

                                                
                                                

                                                
                                                
>>> host: kubelet daemon status:
* Profile "false-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-577222"

                                                
                                                

                                                
                                                
>>> host: kubelet daemon config:
* Profile "false-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-577222"

                                                
                                                

                                                
                                                
>>> k8s: kubelet logs:
* Profile "false-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-577222"

                                                
                                                

                                                
                                                
>>> host: /etc/kubernetes/kubelet.conf:
* Profile "false-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-577222"

                                                
                                                

                                                
                                                
>>> host: /var/lib/kubelet/config.yaml:
* Profile "false-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-577222"

                                                
                                                

                                                
                                                
>>> k8s: kubectl config:
apiVersion: v1
clusters: null
contexts: null
current-context: ""
kind: Config
preferences: {}
users: null

                                                
                                                

                                                
                                                
>>> k8s: cms:
Error in configuration: context was not found for specified context: false-577222

                                                
                                                

                                                
                                                
>>> host: docker daemon status:
* Profile "false-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-577222"

                                                
                                                

                                                
                                                
>>> host: docker daemon config:
* Profile "false-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-577222"

                                                
                                                

                                                
                                                
>>> host: /etc/docker/daemon.json:
* Profile "false-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-577222"

                                                
                                                

                                                
                                                
>>> host: docker system info:
* Profile "false-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-577222"

                                                
                                                

                                                
                                                
>>> host: cri-docker daemon status:
* Profile "false-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-577222"

                                                
                                                

                                                
                                                
>>> host: cri-docker daemon config:
* Profile "false-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-577222"

                                                
                                                

                                                
                                                
>>> host: /etc/systemd/system/cri-docker.service.d/10-cni.conf:
* Profile "false-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-577222"

                                                
                                                

                                                
                                                
>>> host: /usr/lib/systemd/system/cri-docker.service:
* Profile "false-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-577222"

                                                
                                                

                                                
                                                
>>> host: cri-dockerd version:
* Profile "false-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-577222"

                                                
                                                

                                                
                                                
>>> host: containerd daemon status:
* Profile "false-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-577222"

                                                
                                                

                                                
                                                
>>> host: containerd daemon config:
* Profile "false-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-577222"

                                                
                                                

                                                
                                                
>>> host: /lib/systemd/system/containerd.service:
* Profile "false-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-577222"

                                                
                                                

                                                
                                                
>>> host: /etc/containerd/config.toml:
* Profile "false-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-577222"

                                                
                                                

                                                
                                                
>>> host: containerd config dump:
* Profile "false-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-577222"

                                                
                                                

                                                
                                                
>>> host: crio daemon status:
* Profile "false-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-577222"

                                                
                                                

                                                
                                                
>>> host: crio daemon config:
* Profile "false-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-577222"

                                                
                                                

                                                
                                                
>>> host: /etc/crio:
* Profile "false-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-577222"

                                                
                                                

                                                
                                                
>>> host: crio config:
* Profile "false-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p false-577222"

                                                
                                                
----------------------- debugLogs end: false-577222 [took: 8.022776281s] --------------------------------
helpers_test.go:175: Cleaning up "false-577222" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p false-577222
--- PASS: TestNetworkPlugins/group/false (8.67s)

                                                
                                    
x
+
TestNoKubernetes/serial/StartWithStopK8s (5.76s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/StartWithStopK8s
no_kubernetes_test.go:112: (dbg) Run:  out/minikube-linux-amd64 start -p NoKubernetes-915583 --no-kubernetes --driver=docker  --container-runtime=crio
no_kubernetes_test.go:112: (dbg) Done: out/minikube-linux-amd64 start -p NoKubernetes-915583 --no-kubernetes --driver=docker  --container-runtime=crio: (3.496862642s)
no_kubernetes_test.go:200: (dbg) Run:  out/minikube-linux-amd64 -p NoKubernetes-915583 status -o json
no_kubernetes_test.go:200: (dbg) Non-zero exit: out/minikube-linux-amd64 -p NoKubernetes-915583 status -o json: exit status 2 (251.485133ms)

                                                
                                                
-- stdout --
	{"Name":"NoKubernetes-915583","Host":"Running","Kubelet":"Stopped","APIServer":"Stopped","Kubeconfig":"Configured","Worker":false}

                                                
                                                
-- /stdout --
no_kubernetes_test.go:124: (dbg) Run:  out/minikube-linux-amd64 delete -p NoKubernetes-915583
no_kubernetes_test.go:124: (dbg) Done: out/minikube-linux-amd64 delete -p NoKubernetes-915583: (2.010401131s)
--- PASS: TestNoKubernetes/serial/StartWithStopK8s (5.76s)

                                                
                                    
x
+
TestNoKubernetes/serial/Start (6.78s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/Start
no_kubernetes_test.go:136: (dbg) Run:  out/minikube-linux-amd64 start -p NoKubernetes-915583 --no-kubernetes --driver=docker  --container-runtime=crio
no_kubernetes_test.go:136: (dbg) Done: out/minikube-linux-amd64 start -p NoKubernetes-915583 --no-kubernetes --driver=docker  --container-runtime=crio: (6.781931687s)
--- PASS: TestNoKubernetes/serial/Start (6.78s)

                                                
                                    
x
+
TestNoKubernetes/serial/VerifyK8sNotRunning (0.27s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/VerifyK8sNotRunning
no_kubernetes_test.go:147: (dbg) Run:  out/minikube-linux-amd64 ssh -p NoKubernetes-915583 "sudo systemctl is-active --quiet service kubelet"
no_kubernetes_test.go:147: (dbg) Non-zero exit: out/minikube-linux-amd64 ssh -p NoKubernetes-915583 "sudo systemctl is-active --quiet service kubelet": exit status 1 (265.08808ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 3

                                                
                                                
** /stderr **
--- PASS: TestNoKubernetes/serial/VerifyK8sNotRunning (0.27s)

                                                
                                    
x
+
TestNoKubernetes/serial/ProfileList (1.16s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/ProfileList
no_kubernetes_test.go:169: (dbg) Run:  out/minikube-linux-amd64 profile list
no_kubernetes_test.go:179: (dbg) Run:  out/minikube-linux-amd64 profile list --output=json
--- PASS: TestNoKubernetes/serial/ProfileList (1.16s)

                                                
                                    
x
+
TestNoKubernetes/serial/Stop (2.93s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/Stop
no_kubernetes_test.go:158: (dbg) Run:  out/minikube-linux-amd64 stop -p NoKubernetes-915583
no_kubernetes_test.go:158: (dbg) Done: out/minikube-linux-amd64 stop -p NoKubernetes-915583: (2.926087185s)
--- PASS: TestNoKubernetes/serial/Stop (2.93s)

                                                
                                    
x
+
TestNoKubernetes/serial/StartNoArgs (6.28s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/StartNoArgs
no_kubernetes_test.go:191: (dbg) Run:  out/minikube-linux-amd64 start -p NoKubernetes-915583 --driver=docker  --container-runtime=crio
no_kubernetes_test.go:191: (dbg) Done: out/minikube-linux-amd64 start -p NoKubernetes-915583 --driver=docker  --container-runtime=crio: (6.275450385s)
--- PASS: TestNoKubernetes/serial/StartNoArgs (6.28s)

                                                
                                    
x
+
TestNoKubernetes/serial/VerifyK8sNotRunningSecond (0.27s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/VerifyK8sNotRunningSecond
no_kubernetes_test.go:147: (dbg) Run:  out/minikube-linux-amd64 ssh -p NoKubernetes-915583 "sudo systemctl is-active --quiet service kubelet"
no_kubernetes_test.go:147: (dbg) Non-zero exit: out/minikube-linux-amd64 ssh -p NoKubernetes-915583 "sudo systemctl is-active --quiet service kubelet": exit status 1 (271.013097ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 3

                                                
                                                
** /stderr **
--- PASS: TestNoKubernetes/serial/VerifyK8sNotRunningSecond (0.27s)

                                                
                                    
x
+
TestStoppedBinaryUpgrade/Setup (0.6s)

                                                
                                                
=== RUN   TestStoppedBinaryUpgrade/Setup
E0731 11:04:32.970514   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/client.crt: no such file or directory
--- PASS: TestStoppedBinaryUpgrade/Setup (0.60s)

                                                
                                    
x
+
TestStoppedBinaryUpgrade/MinikubeLogs (0.49s)

                                                
                                                
=== RUN   TestStoppedBinaryUpgrade/MinikubeLogs
version_upgrade_test.go:218: (dbg) Run:  out/minikube-linux-amd64 logs -p stopped-upgrade-039423
--- PASS: TestStoppedBinaryUpgrade/MinikubeLogs (0.49s)

                                                
                                    
x
+
TestPause/serial/Start (38.14s)

                                                
                                                
=== RUN   TestPause/serial/Start
pause_test.go:80: (dbg) Run:  out/minikube-linux-amd64 start -p pause-588607 --memory=2048 --install-addons=false --wait=all --driver=docker  --container-runtime=crio
pause_test.go:80: (dbg) Done: out/minikube-linux-amd64 start -p pause-588607 --memory=2048 --install-addons=false --wait=all --driver=docker  --container-runtime=crio: (38.142595835s)
--- PASS: TestPause/serial/Start (38.14s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/Start (46.17s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-amd64 start -p auto-577222 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --driver=docker  --container-runtime=crio
E0731 11:07:12.323289   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/functional-683521/client.crt: no such file or directory
net_test.go:112: (dbg) Done: out/minikube-linux-amd64 start -p auto-577222 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --driver=docker  --container-runtime=crio: (46.1675041s)
--- PASS: TestNetworkPlugins/group/auto/Start (46.17s)

                                                
                                    
x
+
TestPause/serial/SecondStartNoReconfiguration (29.59s)

                                                
                                                
=== RUN   TestPause/serial/SecondStartNoReconfiguration
pause_test.go:92: (dbg) Run:  out/minikube-linux-amd64 start -p pause-588607 --alsologtostderr -v=1 --driver=docker  --container-runtime=crio
pause_test.go:92: (dbg) Done: out/minikube-linux-amd64 start -p pause-588607 --alsologtostderr -v=1 --driver=docker  --container-runtime=crio: (29.56910184s)
--- PASS: TestPause/serial/SecondStartNoReconfiguration (29.59s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/KubeletFlags (0.35s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-amd64 ssh -p auto-577222 "pgrep -a kubelet"
--- PASS: TestNetworkPlugins/group/auto/KubeletFlags (0.35s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/NetCatPod (10.49s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context auto-577222 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/auto/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:344: "netcat-7458db8b8-fzqzr" [dab3a019-dfb5-448b-8135-bae9b7b262cb] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:344: "netcat-7458db8b8-fzqzr" [dab3a019-dfb5-448b-8135-bae9b7b262cb] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/auto/NetCatPod: app=netcat healthy within 10.012590088s
--- PASS: TestNetworkPlugins/group/auto/NetCatPod (10.49s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/Start (67.39s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-amd64 start -p kindnet-577222 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=kindnet --driver=docker  --container-runtime=crio
net_test.go:112: (dbg) Done: out/minikube-linux-amd64 start -p kindnet-577222 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=kindnet --driver=docker  --container-runtime=crio: (1m7.39043469s)
--- PASS: TestNetworkPlugins/group/kindnet/Start (67.39s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/DNS (0.16s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/DNS
net_test.go:175: (dbg) Run:  kubectl --context auto-577222 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/auto/DNS (0.16s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/Localhost (0.14s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/Localhost
net_test.go:194: (dbg) Run:  kubectl --context auto-577222 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/auto/Localhost (0.14s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/HairPin (0.14s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/HairPin
net_test.go:264: (dbg) Run:  kubectl --context auto-577222 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/auto/HairPin (0.14s)

                                                
                                    
x
+
TestPause/serial/Pause (0.75s)

                                                
                                                
=== RUN   TestPause/serial/Pause
pause_test.go:110: (dbg) Run:  out/minikube-linux-amd64 pause -p pause-588607 --alsologtostderr -v=5
--- PASS: TestPause/serial/Pause (0.75s)

                                                
                                    
x
+
TestPause/serial/VerifyStatus (0.3s)

                                                
                                                
=== RUN   TestPause/serial/VerifyStatus
status_test.go:76: (dbg) Run:  out/minikube-linux-amd64 status -p pause-588607 --output=json --layout=cluster
status_test.go:76: (dbg) Non-zero exit: out/minikube-linux-amd64 status -p pause-588607 --output=json --layout=cluster: exit status 2 (304.300518ms)

                                                
                                                
-- stdout --
	{"Name":"pause-588607","StatusCode":418,"StatusName":"Paused","Step":"Done","StepDetail":"* Paused 7 containers in: kube-system, kubernetes-dashboard, storage-gluster, istio-operator","BinaryVersion":"v1.31.1","Components":{"kubeconfig":{"Name":"kubeconfig","StatusCode":200,"StatusName":"OK"}},"Nodes":[{"Name":"pause-588607","StatusCode":200,"StatusName":"OK","Components":{"apiserver":{"Name":"apiserver","StatusCode":418,"StatusName":"Paused"},"kubelet":{"Name":"kubelet","StatusCode":405,"StatusName":"Stopped"}}}]}

                                                
                                                
-- /stdout --
--- PASS: TestPause/serial/VerifyStatus (0.30s)

                                                
                                    
x
+
TestPause/serial/Unpause (0.69s)

                                                
                                                
=== RUN   TestPause/serial/Unpause
pause_test.go:121: (dbg) Run:  out/minikube-linux-amd64 unpause -p pause-588607 --alsologtostderr -v=5
--- PASS: TestPause/serial/Unpause (0.69s)

                                                
                                    
x
+
TestPause/serial/PauseAgain (0.89s)

                                                
                                                
=== RUN   TestPause/serial/PauseAgain
pause_test.go:110: (dbg) Run:  out/minikube-linux-amd64 pause -p pause-588607 --alsologtostderr -v=5
--- PASS: TestPause/serial/PauseAgain (0.89s)

                                                
                                    
x
+
TestPause/serial/DeletePaused (2.9s)

                                                
                                                
=== RUN   TestPause/serial/DeletePaused
pause_test.go:132: (dbg) Run:  out/minikube-linux-amd64 delete -p pause-588607 --alsologtostderr -v=5
pause_test.go:132: (dbg) Done: out/minikube-linux-amd64 delete -p pause-588607 --alsologtostderr -v=5: (2.896581162s)
--- PASS: TestPause/serial/DeletePaused (2.90s)

                                                
                                    
x
+
TestPause/serial/VerifyDeletedResources (1.81s)

                                                
                                                
=== RUN   TestPause/serial/VerifyDeletedResources
pause_test.go:142: (dbg) Run:  out/minikube-linux-amd64 profile list --output json
pause_test.go:142: (dbg) Done: out/minikube-linux-amd64 profile list --output json: (1.748042174s)
pause_test.go:168: (dbg) Run:  docker ps -a
pause_test.go:173: (dbg) Run:  docker volume inspect pause-588607
pause_test.go:173: (dbg) Non-zero exit: docker volume inspect pause-588607: exit status 1 (17.439257ms)

                                                
                                                
-- stdout --
	[]

                                                
                                                
-- /stdout --
** stderr ** 
	Error response from daemon: get pause-588607: no such volume

                                                
                                                
** /stderr **
pause_test.go:178: (dbg) Run:  docker network ls
--- PASS: TestPause/serial/VerifyDeletedResources (1.81s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/Start (62.14s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-amd64 start -p calico-577222 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=calico --driver=docker  --container-runtime=crio
net_test.go:112: (dbg) Done: out/minikube-linux-amd64 start -p calico-577222 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=calico --driver=docker  --container-runtime=crio: (1m2.142569592s)
--- PASS: TestNetworkPlugins/group/calico/Start (62.14s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/Start (56.79s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-amd64 start -p custom-flannel-577222 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=testdata/kube-flannel.yaml --driver=docker  --container-runtime=crio
net_test.go:112: (dbg) Done: out/minikube-linux-amd64 start -p custom-flannel-577222 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=testdata/kube-flannel.yaml --driver=docker  --container-runtime=crio: (56.794180118s)
--- PASS: TestNetworkPlugins/group/custom-flannel/Start (56.79s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/ControllerPod (5.02s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/ControllerPod
net_test.go:120: (dbg) TestNetworkPlugins/group/kindnet/ControllerPod: waiting 10m0s for pods matching "app=kindnet" in namespace "kube-system" ...
helpers_test.go:344: "kindnet-cvkft" [0f83375c-c8da-4929-a4fc-313226a90a63] Running
net_test.go:120: (dbg) TestNetworkPlugins/group/kindnet/ControllerPod: app=kindnet healthy within 5.018228856s
--- PASS: TestNetworkPlugins/group/kindnet/ControllerPod (5.02s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/KubeletFlags (0.24s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-amd64 ssh -p kindnet-577222 "pgrep -a kubelet"
--- PASS: TestNetworkPlugins/group/kindnet/KubeletFlags (0.24s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/NetCatPod (10.33s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context kindnet-577222 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/kindnet/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:344: "netcat-7458db8b8-9lft4" [20c8be91-8c73-4c8f-9f01-1c7751e70360] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:344: "netcat-7458db8b8-9lft4" [20c8be91-8c73-4c8f-9f01-1c7751e70360] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/kindnet/NetCatPod: app=netcat healthy within 10.008677784s
--- PASS: TestNetworkPlugins/group/kindnet/NetCatPod (10.33s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/KubeletFlags (0.28s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-amd64 ssh -p custom-flannel-577222 "pgrep -a kubelet"
--- PASS: TestNetworkPlugins/group/custom-flannel/KubeletFlags (0.28s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/NetCatPod (10.29s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context custom-flannel-577222 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/custom-flannel/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:344: "netcat-7458db8b8-vjzc8" [1e933b9b-5868-46ba-b97c-7b7ee0211bd5] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:344: "netcat-7458db8b8-vjzc8" [1e933b9b-5868-46ba-b97c-7b7ee0211bd5] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/custom-flannel/NetCatPod: app=netcat healthy within 10.00867022s
--- PASS: TestNetworkPlugins/group/custom-flannel/NetCatPod (10.29s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/ControllerPod (5.03s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/ControllerPod
net_test.go:120: (dbg) TestNetworkPlugins/group/calico/ControllerPod: waiting 10m0s for pods matching "k8s-app=calico-node" in namespace "kube-system" ...
helpers_test.go:344: "calico-node-bp9ph" [d5d74138-7824-46a4-974f-3719a9419ad5] Running
net_test.go:120: (dbg) TestNetworkPlugins/group/calico/ControllerPod: k8s-app=calico-node healthy within 5.025775581s
--- PASS: TestNetworkPlugins/group/calico/ControllerPod (5.03s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/DNS (0.15s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/DNS
net_test.go:175: (dbg) Run:  kubectl --context kindnet-577222 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/kindnet/DNS (0.15s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/Localhost (0.12s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/Localhost
net_test.go:194: (dbg) Run:  kubectl --context kindnet-577222 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/kindnet/Localhost (0.12s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/HairPin (0.13s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/HairPin
net_test.go:264: (dbg) Run:  kubectl --context kindnet-577222 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/kindnet/HairPin (0.13s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/KubeletFlags (0.25s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-amd64 ssh -p calico-577222 "pgrep -a kubelet"
--- PASS: TestNetworkPlugins/group/calico/KubeletFlags (0.25s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/NetCatPod (10.39s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context calico-577222 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/calico/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:344: "netcat-7458db8b8-sr6xl" [ec00cdb7-654a-45ca-ba10-580a7edab204] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
E0731 11:09:32.970356   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/client.crt: no such file or directory
helpers_test.go:344: "netcat-7458db8b8-sr6xl" [ec00cdb7-654a-45ca-ba10-580a7edab204] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/calico/NetCatPod: app=netcat healthy within 10.011384918s
--- PASS: TestNetworkPlugins/group/calico/NetCatPod (10.39s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/DNS (0.2s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/DNS
net_test.go:175: (dbg) Run:  kubectl --context custom-flannel-577222 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/custom-flannel/DNS (0.20s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/Localhost (0.2s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/Localhost
net_test.go:194: (dbg) Run:  kubectl --context custom-flannel-577222 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/custom-flannel/Localhost (0.20s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/HairPin (0.19s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/HairPin
net_test.go:264: (dbg) Run:  kubectl --context custom-flannel-577222 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/custom-flannel/HairPin (0.19s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/DNS (0.19s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/DNS
net_test.go:175: (dbg) Run:  kubectl --context calico-577222 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/calico/DNS (0.19s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/Localhost (0.15s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/Localhost
net_test.go:194: (dbg) Run:  kubectl --context calico-577222 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/calico/Localhost (0.15s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/HairPin (0.16s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/HairPin
net_test.go:264: (dbg) Run:  kubectl --context calico-577222 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/calico/HairPin (0.16s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/Start (81.35s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-amd64 start -p enable-default-cni-577222 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --enable-default-cni=true --driver=docker  --container-runtime=crio
net_test.go:112: (dbg) Done: out/minikube-linux-amd64 start -p enable-default-cni-577222 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --enable-default-cni=true --driver=docker  --container-runtime=crio: (1m21.348760706s)
--- PASS: TestNetworkPlugins/group/enable-default-cni/Start (81.35s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/Start (63.21s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-amd64 start -p flannel-577222 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=flannel --driver=docker  --container-runtime=crio
net_test.go:112: (dbg) Done: out/minikube-linux-amd64 start -p flannel-577222 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=flannel --driver=docker  --container-runtime=crio: (1m3.214157637s)
--- PASS: TestNetworkPlugins/group/flannel/Start (63.21s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/Start (40.59s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-amd64 start -p bridge-577222 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=bridge --driver=docker  --container-runtime=crio
net_test.go:112: (dbg) Done: out/minikube-linux-amd64 start -p bridge-577222 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=bridge --driver=docker  --container-runtime=crio: (40.591137721s)
--- PASS: TestNetworkPlugins/group/bridge/Start (40.59s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/FirstStart (113.1s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/FirstStart
start_stop_delete_test.go:186: (dbg) Run:  out/minikube-linux-amd64 start -p old-k8s-version-070357 --memory=2200 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker  --container-runtime=crio --kubernetes-version=v1.16.0
start_stop_delete_test.go:186: (dbg) Done: out/minikube-linux-amd64 start -p old-k8s-version-070357 --memory=2200 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker  --container-runtime=crio --kubernetes-version=v1.16.0: (1m53.0990236s)
--- PASS: TestStartStop/group/old-k8s-version/serial/FirstStart (113.10s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/KubeletFlags (0.35s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-amd64 ssh -p bridge-577222 "pgrep -a kubelet"
--- PASS: TestNetworkPlugins/group/bridge/KubeletFlags (0.35s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/NetCatPod (12.36s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context bridge-577222 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/bridge/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:344: "netcat-7458db8b8-qvbmk" [ba70d94a-e973-4c2e-8d2e-157690db9118] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:344: "netcat-7458db8b8-qvbmk" [ba70d94a-e973-4c2e-8d2e-157690db9118] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/bridge/NetCatPod: app=netcat healthy within 12.010055216s
--- PASS: TestNetworkPlugins/group/bridge/NetCatPod (12.36s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/DNS (0.18s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/DNS
net_test.go:175: (dbg) Run:  kubectl --context bridge-577222 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/bridge/DNS (0.18s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/Localhost (0.18s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/Localhost
net_test.go:194: (dbg) Run:  kubectl --context bridge-577222 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/bridge/Localhost (0.18s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/HairPin (0.21s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/HairPin
net_test.go:264: (dbg) Run:  kubectl --context bridge-577222 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/bridge/HairPin (0.21s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/ControllerPod (5.02s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/ControllerPod
net_test.go:120: (dbg) TestNetworkPlugins/group/flannel/ControllerPod: waiting 10m0s for pods matching "app=flannel" in namespace "kube-flannel" ...
helpers_test.go:344: "kube-flannel-ds-tx5gg" [ec6c8603-d042-4701-92b2-1359808f43db] Running
net_test.go:120: (dbg) TestNetworkPlugins/group/flannel/ControllerPod: app=flannel healthy within 5.019386039s
--- PASS: TestNetworkPlugins/group/flannel/ControllerPod (5.02s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/KubeletFlags (0.29s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-amd64 ssh -p flannel-577222 "pgrep -a kubelet"
--- PASS: TestNetworkPlugins/group/flannel/KubeletFlags (0.29s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/NetCatPod (10.33s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context flannel-577222 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/flannel/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:344: "netcat-7458db8b8-psq4r" [3a9e21bd-00bc-4c78-a764-5715f7829735] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:344: "netcat-7458db8b8-psq4r" [3a9e21bd-00bc-4c78-a764-5715f7829735] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/flannel/NetCatPod: app=netcat healthy within 10.008713219s
--- PASS: TestNetworkPlugins/group/flannel/NetCatPod (10.33s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/KubeletFlags (0.28s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-amd64 ssh -p enable-default-cni-577222 "pgrep -a kubelet"
--- PASS: TestNetworkPlugins/group/enable-default-cni/KubeletFlags (0.28s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/NetCatPod (10.34s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context enable-default-cni-577222 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/enable-default-cni/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:344: "netcat-7458db8b8-9rwg9" [4002aede-ed76-4023-ab41-93b9f9c34f12] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:344: "netcat-7458db8b8-9rwg9" [4002aede-ed76-4023-ab41-93b9f9c34f12] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/enable-default-cni/NetCatPod: app=netcat healthy within 10.010764111s
--- PASS: TestNetworkPlugins/group/enable-default-cni/NetCatPod (10.34s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/DNS (0.21s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/DNS
net_test.go:175: (dbg) Run:  kubectl --context flannel-577222 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/flannel/DNS (0.21s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/Localhost (0.15s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/Localhost
net_test.go:194: (dbg) Run:  kubectl --context flannel-577222 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/flannel/Localhost (0.15s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/HairPin (0.16s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/HairPin
net_test.go:264: (dbg) Run:  kubectl --context flannel-577222 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/flannel/HairPin (0.16s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/DNS (0.17s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/DNS
net_test.go:175: (dbg) Run:  kubectl --context enable-default-cni-577222 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/enable-default-cni/DNS (0.17s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/FirstStart (57.35s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/FirstStart
start_stop_delete_test.go:186: (dbg) Run:  out/minikube-linux-amd64 start -p no-preload-299339 --memory=2200 --alsologtostderr --wait=true --preload=false --driver=docker  --container-runtime=crio --kubernetes-version=v1.27.3
start_stop_delete_test.go:186: (dbg) Done: out/minikube-linux-amd64 start -p no-preload-299339 --memory=2200 --alsologtostderr --wait=true --preload=false --driver=docker  --container-runtime=crio --kubernetes-version=v1.27.3: (57.346018192s)
--- PASS: TestStartStop/group/no-preload/serial/FirstStart (57.35s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/Localhost (0.15s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/Localhost
net_test.go:194: (dbg) Run:  kubectl --context enable-default-cni-577222 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/enable-default-cni/Localhost (0.15s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/HairPin (0.15s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/HairPin
net_test.go:264: (dbg) Run:  kubectl --context enable-default-cni-577222 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/enable-default-cni/HairPin (0.15s)
E0731 11:15:34.346144   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/kindnet-577222/client.crt: no such file or directory
E0731 11:15:40.752609   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/auto-577222/client.crt: no such file or directory
E0731 11:15:45.906174   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/custom-flannel-577222/client.crt: no such file or directory
E0731 11:15:45.930360   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/bridge-577222/client.crt: no such file or directory
E0731 11:15:45.935590   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/bridge-577222/client.crt: no such file or directory
E0731 11:15:45.945816   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/bridge-577222/client.crt: no such file or directory
E0731 11:15:45.966065   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/bridge-577222/client.crt: no such file or directory
E0731 11:15:46.006325   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/bridge-577222/client.crt: no such file or directory
E0731 11:15:46.086924   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/bridge-577222/client.crt: no such file or directory
E0731 11:15:46.247296   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/bridge-577222/client.crt: no such file or directory
E0731 11:15:46.568157   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/bridge-577222/client.crt: no such file or directory
E0731 11:15:47.209239   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/bridge-577222/client.crt: no such file or directory
E0731 11:15:48.489931   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/bridge-577222/client.crt: no such file or directory
E0731 11:15:48.591227   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/calico-577222/client.crt: no such file or directory
E0731 11:15:51.050383   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/bridge-577222/client.crt: no such file or directory
E0731 11:15:56.171089   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/bridge-577222/client.crt: no such file or directory
E0731 11:15:58.881492   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/client.crt: no such file or directory
E0731 11:15:59.750487   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/flannel-577222/client.crt: no such file or directory
E0731 11:15:59.755733   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/flannel-577222/client.crt: no such file or directory
E0731 11:15:59.765960   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/flannel-577222/client.crt: no such file or directory
E0731 11:15:59.786183   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/flannel-577222/client.crt: no such file or directory
E0731 11:15:59.826445   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/flannel-577222/client.crt: no such file or directory
E0731 11:15:59.906752   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/flannel-577222/client.crt: no such file or directory
E0731 11:16:00.067096   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/flannel-577222/client.crt: no such file or directory
E0731 11:16:00.387227   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/flannel-577222/client.crt: no such file or directory
E0731 11:16:01.028102   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/flannel-577222/client.crt: no such file or directory
E0731 11:16:02.308390   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/flannel-577222/client.crt: no such file or directory
E0731 11:16:04.869077   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/flannel-577222/client.crt: no such file or directory
E0731 11:16:06.411902   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/bridge-577222/client.crt: no such file or directory
E0731 11:16:09.989848   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/flannel-577222/client.crt: no such file or directory
E0731 11:16:10.020077   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/enable-default-cni-577222/client.crt: no such file or directory
E0731 11:16:10.025276   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/enable-default-cni-577222/client.crt: no such file or directory
E0731 11:16:10.035505   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/enable-default-cni-577222/client.crt: no such file or directory
E0731 11:16:10.055760   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/enable-default-cni-577222/client.crt: no such file or directory
E0731 11:16:10.096040   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/enable-default-cni-577222/client.crt: no such file or directory
E0731 11:16:10.176365   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/enable-default-cni-577222/client.crt: no such file or directory
E0731 11:16:10.336978   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/enable-default-cni-577222/client.crt: no such file or directory
E0731 11:16:10.657564   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/enable-default-cni-577222/client.crt: no such file or directory
E0731 11:16:11.298605   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/enable-default-cni-577222/client.crt: no such file or directory
E0731 11:16:12.579693   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/enable-default-cni-577222/client.crt: no such file or directory
E0731 11:16:15.140320   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/enable-default-cni-577222/client.crt: no such file or directory
E0731 11:16:20.230090   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/flannel-577222/client.crt: no such file or directory
E0731 11:16:20.261347   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/enable-default-cni-577222/client.crt: no such file or directory
E0731 11:16:26.892266   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/bridge-577222/client.crt: no such file or directory
E0731 11:16:30.502358   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/enable-default-cni-577222/client.crt: no such file or directory
E0731 11:16:40.710305   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/flannel-577222/client.crt: no such file or directory
E0731 11:16:50.983444   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/enable-default-cni-577222/client.crt: no such file or directory
E0731 11:16:56.267055   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/kindnet-577222/client.crt: no such file or directory
E0731 11:17:07.826475   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/custom-flannel-577222/client.crt: no such file or directory
E0731 11:17:07.852780   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/bridge-577222/client.crt: no such file or directory
E0731 11:17:10.511758   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/calico-577222/client.crt: no such file or directory
E0731 11:17:12.322317   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/functional-683521/client.crt: no such file or directory
E0731 11:17:21.670746   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/flannel-577222/client.crt: no such file or directory
E0731 11:17:31.944176   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/enable-default-cni-577222/client.crt: no such file or directory
E0731 11:17:38.097092   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/old-k8s-version-070357/client.crt: no such file or directory
E0731 11:17:38.102355   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/old-k8s-version-070357/client.crt: no such file or directory
E0731 11:17:38.112615   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/old-k8s-version-070357/client.crt: no such file or directory
E0731 11:17:38.132918   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/old-k8s-version-070357/client.crt: no such file or directory
E0731 11:17:38.173197   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/old-k8s-version-070357/client.crt: no such file or directory
E0731 11:17:38.253518   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/old-k8s-version-070357/client.crt: no such file or directory
E0731 11:17:38.413891   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/old-k8s-version-070357/client.crt: no such file or directory
E0731 11:17:38.734441   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/old-k8s-version-070357/client.crt: no such file or directory
E0731 11:17:39.375396   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/old-k8s-version-070357/client.crt: no such file or directory
E0731 11:17:40.656224   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/old-k8s-version-070357/client.crt: no such file or directory
E0731 11:17:43.217303   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/old-k8s-version-070357/client.crt: no such file or directory
E0731 11:17:48.337838   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/old-k8s-version-070357/client.crt: no such file or directory
E0731 11:17:56.908718   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/auto-577222/client.crt: no such file or directory
E0731 11:17:58.578915   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/old-k8s-version-070357/client.crt: no such file or directory

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/FirstStart (72.27s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/FirstStart
start_stop_delete_test.go:186: (dbg) Run:  out/minikube-linux-amd64 start -p embed-certs-746333 --memory=2200 --alsologtostderr --wait=true --embed-certs --driver=docker  --container-runtime=crio --kubernetes-version=v1.27.3
start_stop_delete_test.go:186: (dbg) Done: out/minikube-linux-amd64 start -p embed-certs-746333 --memory=2200 --alsologtostderr --wait=true --embed-certs --driver=docker  --container-runtime=crio --kubernetes-version=v1.27.3: (1m12.27426046s)
--- PASS: TestStartStop/group/embed-certs/serial/FirstStart (72.27s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/FirstStart (67.54s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/FirstStart
start_stop_delete_test.go:186: (dbg) Run:  out/minikube-linux-amd64 start -p default-k8s-diff-port-628397 --memory=2200 --alsologtostderr --wait=true --apiserver-port=8444 --driver=docker  --container-runtime=crio --kubernetes-version=v1.27.3
E0731 11:12:12.323301   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/functional-683521/client.crt: no such file or directory
start_stop_delete_test.go:186: (dbg) Done: out/minikube-linux-amd64 start -p default-k8s-diff-port-628397 --memory=2200 --alsologtostderr --wait=true --apiserver-port=8444 --driver=docker  --container-runtime=crio --kubernetes-version=v1.27.3: (1m7.535536725s)
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/FirstStart (67.54s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/DeployApp (8.39s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/DeployApp
start_stop_delete_test.go:196: (dbg) Run:  kubectl --context no-preload-299339 create -f testdata/busybox.yaml
start_stop_delete_test.go:196: (dbg) TestStartStop/group/no-preload/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:344: "busybox" [276c3995-a81d-4c0b-aa8e-356db2b10bd5] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:344: "busybox" [276c3995-a81d-4c0b-aa8e-356db2b10bd5] Running
start_stop_delete_test.go:196: (dbg) TestStartStop/group/no-preload/serial/DeployApp: integration-test=busybox healthy within 8.015326896s
start_stop_delete_test.go:196: (dbg) Run:  kubectl --context no-preload-299339 exec busybox -- /bin/sh -c "ulimit -n"
--- PASS: TestStartStop/group/no-preload/serial/DeployApp (8.39s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/EnableAddonWhileActive (1.02s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/EnableAddonWhileActive
start_stop_delete_test.go:205: (dbg) Run:  out/minikube-linux-amd64 addons enable metrics-server -p no-preload-299339 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain
start_stop_delete_test.go:215: (dbg) Run:  kubectl --context no-preload-299339 describe deploy/metrics-server -n kube-system
--- PASS: TestStartStop/group/no-preload/serial/EnableAddonWhileActive (1.02s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/Stop (11.91s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/Stop
start_stop_delete_test.go:228: (dbg) Run:  out/minikube-linux-amd64 stop -p no-preload-299339 --alsologtostderr -v=3
start_stop_delete_test.go:228: (dbg) Done: out/minikube-linux-amd64 stop -p no-preload-299339 --alsologtostderr -v=3: (11.913478507s)
--- PASS: TestStartStop/group/no-preload/serial/Stop (11.91s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/DeployApp (7.41s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/DeployApp
start_stop_delete_test.go:196: (dbg) Run:  kubectl --context old-k8s-version-070357 create -f testdata/busybox.yaml
start_stop_delete_test.go:196: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:344: "busybox" [77589e07-1dce-4330-a349-4e52bbb21d93] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:344: "busybox" [77589e07-1dce-4330-a349-4e52bbb21d93] Running
start_stop_delete_test.go:196: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: integration-test=busybox healthy within 7.013908791s
start_stop_delete_test.go:196: (dbg) Run:  kubectl --context old-k8s-version-070357 exec busybox -- /bin/sh -c "ulimit -n"
--- PASS: TestStartStop/group/old-k8s-version/serial/DeployApp (7.41s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/EnableAddonAfterStop (0.19s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/EnableAddonAfterStop
start_stop_delete_test.go:239: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Host}} -p no-preload-299339 -n no-preload-299339
start_stop_delete_test.go:239: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Host}} -p no-preload-299339 -n no-preload-299339: exit status 7 (60.336674ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:239: status error: exit status 7 (may be ok)
start_stop_delete_test.go:246: (dbg) Run:  out/minikube-linux-amd64 addons enable dashboard -p no-preload-299339 --images=MetricsScraper=registry.k8s.io/echoserver:1.4
--- PASS: TestStartStop/group/no-preload/serial/EnableAddonAfterStop (0.19s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/SecondStart (334.11s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/SecondStart
start_stop_delete_test.go:256: (dbg) Run:  out/minikube-linux-amd64 start -p no-preload-299339 --memory=2200 --alsologtostderr --wait=true --preload=false --driver=docker  --container-runtime=crio --kubernetes-version=v1.27.3
start_stop_delete_test.go:256: (dbg) Done: out/minikube-linux-amd64 start -p no-preload-299339 --memory=2200 --alsologtostderr --wait=true --preload=false --driver=docker  --container-runtime=crio --kubernetes-version=v1.27.3: (5m33.692012799s)
start_stop_delete_test.go:262: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Host}} -p no-preload-299339 -n no-preload-299339
--- PASS: TestStartStop/group/no-preload/serial/SecondStart (334.11s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/EnableAddonWhileActive (0.74s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/EnableAddonWhileActive
start_stop_delete_test.go:205: (dbg) Run:  out/minikube-linux-amd64 addons enable metrics-server -p old-k8s-version-070357 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain
start_stop_delete_test.go:215: (dbg) Run:  kubectl --context old-k8s-version-070357 describe deploy/metrics-server -n kube-system
--- PASS: TestStartStop/group/old-k8s-version/serial/EnableAddonWhileActive (0.74s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/Stop (11.94s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/Stop
start_stop_delete_test.go:228: (dbg) Run:  out/minikube-linux-amd64 stop -p old-k8s-version-070357 --alsologtostderr -v=3
start_stop_delete_test.go:228: (dbg) Done: out/minikube-linux-amd64 stop -p old-k8s-version-070357 --alsologtostderr -v=3: (11.937689549s)
--- PASS: TestStartStop/group/old-k8s-version/serial/Stop (11.94s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/DeployApp (7.42s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/DeployApp
start_stop_delete_test.go:196: (dbg) Run:  kubectl --context embed-certs-746333 create -f testdata/busybox.yaml
start_stop_delete_test.go:196: (dbg) TestStartStop/group/embed-certs/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:344: "busybox" [d72f2379-6a28-40a1-8ca2-a34ee583810e] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:344: "busybox" [d72f2379-6a28-40a1-8ca2-a34ee583810e] Running
start_stop_delete_test.go:196: (dbg) TestStartStop/group/embed-certs/serial/DeployApp: integration-test=busybox healthy within 7.014840563s
start_stop_delete_test.go:196: (dbg) Run:  kubectl --context embed-certs-746333 exec busybox -- /bin/sh -c "ulimit -n"
--- PASS: TestStartStop/group/embed-certs/serial/DeployApp (7.42s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/DeployApp (7.4s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/DeployApp
start_stop_delete_test.go:196: (dbg) Run:  kubectl --context default-k8s-diff-port-628397 create -f testdata/busybox.yaml
start_stop_delete_test.go:196: (dbg) TestStartStop/group/default-k8s-diff-port/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:344: "busybox" [5682b334-d7f3-405e-bb03-6e7839f946c6] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:344: "busybox" [5682b334-d7f3-405e-bb03-6e7839f946c6] Running
E0731 11:12:56.908963   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/auto-577222/client.crt: no such file or directory
E0731 11:12:56.914236   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/auto-577222/client.crt: no such file or directory
E0731 11:12:56.924498   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/auto-577222/client.crt: no such file or directory
E0731 11:12:56.944761   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/auto-577222/client.crt: no such file or directory
E0731 11:12:56.984992   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/auto-577222/client.crt: no such file or directory
E0731 11:12:57.065782   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/auto-577222/client.crt: no such file or directory
E0731 11:12:57.226170   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/auto-577222/client.crt: no such file or directory
E0731 11:12:57.546293   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/auto-577222/client.crt: no such file or directory
start_stop_delete_test.go:196: (dbg) TestStartStop/group/default-k8s-diff-port/serial/DeployApp: integration-test=busybox healthy within 7.014572052s
start_stop_delete_test.go:196: (dbg) Run:  kubectl --context default-k8s-diff-port-628397 exec busybox -- /bin/sh -c "ulimit -n"
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/DeployApp (7.40s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/EnableAddonAfterStop (0.21s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/EnableAddonAfterStop
start_stop_delete_test.go:239: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Host}} -p old-k8s-version-070357 -n old-k8s-version-070357
start_stop_delete_test.go:239: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Host}} -p old-k8s-version-070357 -n old-k8s-version-070357: exit status 7 (59.419437ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:239: status error: exit status 7 (may be ok)
start_stop_delete_test.go:246: (dbg) Run:  out/minikube-linux-amd64 addons enable dashboard -p old-k8s-version-070357 --images=MetricsScraper=registry.k8s.io/echoserver:1.4
--- PASS: TestStartStop/group/old-k8s-version/serial/EnableAddonAfterStop (0.21s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/SecondStart (36.45s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/SecondStart
start_stop_delete_test.go:256: (dbg) Run:  out/minikube-linux-amd64 start -p old-k8s-version-070357 --memory=2200 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker  --container-runtime=crio --kubernetes-version=v1.16.0
E0731 11:12:58.187305   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/auto-577222/client.crt: no such file or directory
start_stop_delete_test.go:256: (dbg) Done: out/minikube-linux-amd64 start -p old-k8s-version-070357 --memory=2200 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker  --container-runtime=crio --kubernetes-version=v1.16.0: (36.105838237s)
start_stop_delete_test.go:262: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Host}} -p old-k8s-version-070357 -n old-k8s-version-070357
--- PASS: TestStartStop/group/old-k8s-version/serial/SecondStart (36.45s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/EnableAddonWhileActive (1.3s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/EnableAddonWhileActive
start_stop_delete_test.go:205: (dbg) Run:  out/minikube-linux-amd64 addons enable metrics-server -p embed-certs-746333 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain
start_stop_delete_test.go:205: (dbg) Done: out/minikube-linux-amd64 addons enable metrics-server -p embed-certs-746333 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain: (1.212211361s)
start_stop_delete_test.go:215: (dbg) Run:  kubectl --context embed-certs-746333 describe deploy/metrics-server -n kube-system
E0731 11:12:59.467740   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/auto-577222/client.crt: no such file or directory
--- PASS: TestStartStop/group/embed-certs/serial/EnableAddonWhileActive (1.30s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/Stop (14.55s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/Stop
start_stop_delete_test.go:228: (dbg) Run:  out/minikube-linux-amd64 stop -p embed-certs-746333 --alsologtostderr -v=3
start_stop_delete_test.go:228: (dbg) Done: out/minikube-linux-amd64 stop -p embed-certs-746333 --alsologtostderr -v=3: (14.547998841s)
--- PASS: TestStartStop/group/embed-certs/serial/Stop (14.55s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/EnableAddonWhileActive (1.08s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/EnableAddonWhileActive
start_stop_delete_test.go:205: (dbg) Run:  out/minikube-linux-amd64 addons enable metrics-server -p default-k8s-diff-port-628397 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain
start_stop_delete_test.go:215: (dbg) Run:  kubectl --context default-k8s-diff-port-628397 describe deploy/metrics-server -n kube-system
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/EnableAddonWhileActive (1.08s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/Stop (13s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/Stop
start_stop_delete_test.go:228: (dbg) Run:  out/minikube-linux-amd64 stop -p default-k8s-diff-port-628397 --alsologtostderr -v=3
E0731 11:13:02.028616   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/auto-577222/client.crt: no such file or directory
E0731 11:13:07.149118   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/auto-577222/client.crt: no such file or directory
start_stop_delete_test.go:228: (dbg) Done: out/minikube-linux-amd64 stop -p default-k8s-diff-port-628397 --alsologtostderr -v=3: (12.998201153s)
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/Stop (13.00s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/EnableAddonAfterStop (0.2s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/EnableAddonAfterStop
start_stop_delete_test.go:239: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Host}} -p embed-certs-746333 -n embed-certs-746333
start_stop_delete_test.go:239: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Host}} -p embed-certs-746333 -n embed-certs-746333: exit status 7 (68.286066ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:239: status error: exit status 7 (may be ok)
start_stop_delete_test.go:246: (dbg) Run:  out/minikube-linux-amd64 addons enable dashboard -p embed-certs-746333 --images=MetricsScraper=registry.k8s.io/echoserver:1.4
--- PASS: TestStartStop/group/embed-certs/serial/EnableAddonAfterStop (0.20s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/EnableAddonAfterStop (0.19s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/EnableAddonAfterStop
start_stop_delete_test.go:239: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Host}} -p default-k8s-diff-port-628397 -n default-k8s-diff-port-628397
start_stop_delete_test.go:239: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Host}} -p default-k8s-diff-port-628397 -n default-k8s-diff-port-628397: exit status 7 (66.419537ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:239: status error: exit status 7 (may be ok)
start_stop_delete_test.go:246: (dbg) Run:  out/minikube-linux-amd64 addons enable dashboard -p default-k8s-diff-port-628397 --images=MetricsScraper=registry.k8s.io/echoserver:1.4
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/EnableAddonAfterStop (0.19s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/SecondStart (341.46s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/SecondStart
start_stop_delete_test.go:256: (dbg) Run:  out/minikube-linux-amd64 start -p default-k8s-diff-port-628397 --memory=2200 --alsologtostderr --wait=true --apiserver-port=8444 --driver=docker  --container-runtime=crio --kubernetes-version=v1.27.3
start_stop_delete_test.go:256: (dbg) Done: out/minikube-linux-amd64 start -p default-k8s-diff-port-628397 --memory=2200 --alsologtostderr --wait=true --apiserver-port=8444 --driver=docker  --container-runtime=crio --kubernetes-version=v1.27.3: (5m40.984444727s)
start_stop_delete_test.go:262: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Host}} -p default-k8s-diff-port-628397 -n default-k8s-diff-port-628397
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/SecondStart (341.46s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/SecondStart (341.49s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/SecondStart
start_stop_delete_test.go:256: (dbg) Run:  out/minikube-linux-amd64 start -p embed-certs-746333 --memory=2200 --alsologtostderr --wait=true --embed-certs --driver=docker  --container-runtime=crio --kubernetes-version=v1.27.3
E0731 11:13:17.389934   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/auto-577222/client.crt: no such file or directory
start_stop_delete_test.go:256: (dbg) Done: out/minikube-linux-amd64 start -p embed-certs-746333 --memory=2200 --alsologtostderr --wait=true --embed-certs --driver=docker  --container-runtime=crio --kubernetes-version=v1.27.3: (5m41.076702291s)
start_stop_delete_test.go:262: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Host}} -p embed-certs-746333 -n embed-certs-746333
--- PASS: TestStartStop/group/embed-certs/serial/SecondStart (341.49s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop (20.02s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop
start_stop_delete_test.go:274: (dbg) TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
E0731 11:13:37.870986   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/auto-577222/client.crt: no such file or directory
helpers_test.go:344: "kubernetes-dashboard-84b68f675b-nr6cb" [5fa0f9ee-e8c5-4d7a-880c-db7813dce59a] Pending / Ready:ContainersNotReady (containers with unready status: [kubernetes-dashboard]) / ContainersReady:ContainersNotReady (containers with unready status: [kubernetes-dashboard])
helpers_test.go:344: "kubernetes-dashboard-84b68f675b-nr6cb" [5fa0f9ee-e8c5-4d7a-880c-db7813dce59a] Running
start_stop_delete_test.go:274: (dbg) TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 20.017486297s
--- PASS: TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop (20.02s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop (5.07s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop
start_stop_delete_test.go:287: (dbg) TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:344: "kubernetes-dashboard-84b68f675b-nr6cb" [5fa0f9ee-e8c5-4d7a-880c-db7813dce59a] Running
start_stop_delete_test.go:287: (dbg) TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 5.008512777s
start_stop_delete_test.go:291: (dbg) Run:  kubectl --context old-k8s-version-070357 describe deploy/dashboard-metrics-scraper -n kubernetes-dashboard
--- PASS: TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop (5.07s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/VerifyKubernetesImages (0.3s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/VerifyKubernetesImages
start_stop_delete_test.go:304: (dbg) Run:  out/minikube-linux-amd64 ssh -p old-k8s-version-070357 "sudo crictl images -o json"
start_stop_delete_test.go:304: Found non-minikube image: kindest/kindnetd:v20210326-1e038dc5
start_stop_delete_test.go:304: Found non-minikube image: kindest/kindnetd:v20230511-dc714da8
start_stop_delete_test.go:304: Found non-minikube image: gcr.io/k8s-minikube/busybox:1.28.4-glibc
--- PASS: TestStartStop/group/old-k8s-version/serial/VerifyKubernetesImages (0.30s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/Pause (2.62s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/Pause
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-amd64 pause -p old-k8s-version-070357 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-amd64 status --format={{.APIServer}} -p old-k8s-version-070357 -n old-k8s-version-070357
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.APIServer}} -p old-k8s-version-070357 -n old-k8s-version-070357: exit status 2 (274.002457ms)

                                                
                                                
-- stdout --
	Paused

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Kubelet}} -p old-k8s-version-070357 -n old-k8s-version-070357
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Kubelet}} -p old-k8s-version-070357 -n old-k8s-version-070357: exit status 2 (284.518888ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-amd64 unpause -p old-k8s-version-070357 --alsologtostderr -v=1
E0731 11:14:01.929210   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/addons-764200/client.crt: no such file or directory
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-amd64 status --format={{.APIServer}} -p old-k8s-version-070357 -n old-k8s-version-070357
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Kubelet}} -p old-k8s-version-070357 -n old-k8s-version-070357
--- PASS: TestStartStop/group/old-k8s-version/serial/Pause (2.62s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/FirstStart (37.64s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/FirstStart
start_stop_delete_test.go:186: (dbg) Run:  out/minikube-linux-amd64 start -p newest-cni-155249 --memory=2200 --alsologtostderr --wait=apiserver,system_pods,default_sa --feature-gates ServerSideApply=true --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 --driver=docker  --container-runtime=crio --kubernetes-version=v1.27.3
E0731 11:14:12.424369   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/kindnet-577222/client.crt: no such file or directory
E0731 11:14:12.429627   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/kindnet-577222/client.crt: no such file or directory
E0731 11:14:12.439845   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/kindnet-577222/client.crt: no such file or directory
E0731 11:14:12.460092   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/kindnet-577222/client.crt: no such file or directory
E0731 11:14:12.500351   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/kindnet-577222/client.crt: no such file or directory
E0731 11:14:12.580665   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/kindnet-577222/client.crt: no such file or directory
E0731 11:14:12.740978   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/kindnet-577222/client.crt: no such file or directory
E0731 11:14:13.061674   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/kindnet-577222/client.crt: no such file or directory
E0731 11:14:13.702561   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/kindnet-577222/client.crt: no such file or directory
E0731 11:14:14.982950   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/kindnet-577222/client.crt: no such file or directory
E0731 11:14:17.543306   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/kindnet-577222/client.crt: no such file or directory
E0731 11:14:18.831433   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/auto-577222/client.crt: no such file or directory
E0731 11:14:22.664170   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/kindnet-577222/client.crt: no such file or directory
E0731 11:14:23.984310   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/custom-flannel-577222/client.crt: no such file or directory
E0731 11:14:23.989545   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/custom-flannel-577222/client.crt: no such file or directory
E0731 11:14:23.999783   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/custom-flannel-577222/client.crt: no such file or directory
E0731 11:14:24.020034   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/custom-flannel-577222/client.crt: no such file or directory
E0731 11:14:24.060315   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/custom-flannel-577222/client.crt: no such file or directory
E0731 11:14:24.140752   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/custom-flannel-577222/client.crt: no such file or directory
E0731 11:14:24.301167   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/custom-flannel-577222/client.crt: no such file or directory
E0731 11:14:24.621700   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/custom-flannel-577222/client.crt: no such file or directory
E0731 11:14:25.262308   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/custom-flannel-577222/client.crt: no such file or directory
E0731 11:14:26.543237   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/custom-flannel-577222/client.crt: no such file or directory
E0731 11:14:26.668209   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/calico-577222/client.crt: no such file or directory
E0731 11:14:26.674089   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/calico-577222/client.crt: no such file or directory
E0731 11:14:26.684324   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/calico-577222/client.crt: no such file or directory
E0731 11:14:26.704582   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/calico-577222/client.crt: no such file or directory
E0731 11:14:26.744894   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/calico-577222/client.crt: no such file or directory
E0731 11:14:26.825213   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/calico-577222/client.crt: no such file or directory
E0731 11:14:26.985660   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/calico-577222/client.crt: no such file or directory
E0731 11:14:27.306446   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/calico-577222/client.crt: no such file or directory
E0731 11:14:27.947299   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/calico-577222/client.crt: no such file or directory
E0731 11:14:29.104397   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/custom-flannel-577222/client.crt: no such file or directory
E0731 11:14:29.227769   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/calico-577222/client.crt: no such file or directory
E0731 11:14:31.788874   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/calico-577222/client.crt: no such file or directory
E0731 11:14:32.905047   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/kindnet-577222/client.crt: no such file or directory
E0731 11:14:32.970256   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/ingress-addon-legacy-538476/client.crt: no such file or directory
E0731 11:14:34.225253   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/custom-flannel-577222/client.crt: no such file or directory
E0731 11:14:36.909500   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/calico-577222/client.crt: no such file or directory
start_stop_delete_test.go:186: (dbg) Done: out/minikube-linux-amd64 start -p newest-cni-155249 --memory=2200 --alsologtostderr --wait=apiserver,system_pods,default_sa --feature-gates ServerSideApply=true --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 --driver=docker  --container-runtime=crio --kubernetes-version=v1.27.3: (37.638868102s)
--- PASS: TestStartStop/group/newest-cni/serial/FirstStart (37.64s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/DeployApp (0s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/DeployApp
--- PASS: TestStartStop/group/newest-cni/serial/DeployApp (0.00s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/EnableAddonWhileActive (0.84s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/EnableAddonWhileActive
start_stop_delete_test.go:205: (dbg) Run:  out/minikube-linux-amd64 addons enable metrics-server -p newest-cni-155249 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain
start_stop_delete_test.go:211: WARNING: cni mode requires additional setup before pods can schedule :(
--- PASS: TestStartStop/group/newest-cni/serial/EnableAddonWhileActive (0.84s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/Stop (1.2s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/Stop
start_stop_delete_test.go:228: (dbg) Run:  out/minikube-linux-amd64 stop -p newest-cni-155249 --alsologtostderr -v=3
E0731 11:14:44.465722   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/custom-flannel-577222/client.crt: no such file or directory
start_stop_delete_test.go:228: (dbg) Done: out/minikube-linux-amd64 stop -p newest-cni-155249 --alsologtostderr -v=3: (1.200836737s)
--- PASS: TestStartStop/group/newest-cni/serial/Stop (1.20s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/EnableAddonAfterStop (0.16s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/EnableAddonAfterStop
start_stop_delete_test.go:239: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Host}} -p newest-cni-155249 -n newest-cni-155249
start_stop_delete_test.go:239: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Host}} -p newest-cni-155249 -n newest-cni-155249: exit status 7 (59.281002ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:239: status error: exit status 7 (may be ok)
start_stop_delete_test.go:246: (dbg) Run:  out/minikube-linux-amd64 addons enable dashboard -p newest-cni-155249 --images=MetricsScraper=registry.k8s.io/echoserver:1.4
--- PASS: TestStartStop/group/newest-cni/serial/EnableAddonAfterStop (0.16s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/SecondStart (25.57s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/SecondStart
start_stop_delete_test.go:256: (dbg) Run:  out/minikube-linux-amd64 start -p newest-cni-155249 --memory=2200 --alsologtostderr --wait=apiserver,system_pods,default_sa --feature-gates ServerSideApply=true --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 --driver=docker  --container-runtime=crio --kubernetes-version=v1.27.3
E0731 11:14:47.149772   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/calico-577222/client.crt: no such file or directory
E0731 11:14:53.385711   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/kindnet-577222/client.crt: no such file or directory
E0731 11:15:04.946021   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/custom-flannel-577222/client.crt: no such file or directory
E0731 11:15:07.630336   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/calico-577222/client.crt: no such file or directory
start_stop_delete_test.go:256: (dbg) Done: out/minikube-linux-amd64 start -p newest-cni-155249 --memory=2200 --alsologtostderr --wait=apiserver,system_pods,default_sa --feature-gates ServerSideApply=true --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 --driver=docker  --container-runtime=crio --kubernetes-version=v1.27.3: (25.291836135s)
start_stop_delete_test.go:262: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Host}} -p newest-cni-155249 -n newest-cni-155249
--- PASS: TestStartStop/group/newest-cni/serial/SecondStart (25.57s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/UserAppExistsAfterStop (0s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/UserAppExistsAfterStop
start_stop_delete_test.go:273: WARNING: cni mode requires additional setup before pods can schedule :(
--- PASS: TestStartStop/group/newest-cni/serial/UserAppExistsAfterStop (0.00s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/AddonExistsAfterStop (0s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/AddonExistsAfterStop
start_stop_delete_test.go:284: WARNING: cni mode requires additional setup before pods can schedule :(
--- PASS: TestStartStop/group/newest-cni/serial/AddonExistsAfterStop (0.00s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/VerifyKubernetesImages (0.28s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/VerifyKubernetesImages
start_stop_delete_test.go:304: (dbg) Run:  out/minikube-linux-amd64 ssh -p newest-cni-155249 "sudo crictl images -o json"
start_stop_delete_test.go:304: Found non-minikube image: kindest/kindnetd:v20230511-dc714da8
--- PASS: TestStartStop/group/newest-cni/serial/VerifyKubernetesImages (0.28s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/Pause (2.46s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/Pause
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-amd64 pause -p newest-cni-155249 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-amd64 status --format={{.APIServer}} -p newest-cni-155249 -n newest-cni-155249
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.APIServer}} -p newest-cni-155249 -n newest-cni-155249: exit status 2 (278.143556ms)

                                                
                                                
-- stdout --
	Paused

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Kubelet}} -p newest-cni-155249 -n newest-cni-155249
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Kubelet}} -p newest-cni-155249 -n newest-cni-155249: exit status 2 (281.226472ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-amd64 unpause -p newest-cni-155249 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-amd64 status --format={{.APIServer}} -p newest-cni-155249 -n newest-cni-155249
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Kubelet}} -p newest-cni-155249 -n newest-cni-155249
--- PASS: TestStartStop/group/newest-cni/serial/Pause (2.46s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/UserAppExistsAfterStop (14.07s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/UserAppExistsAfterStop
start_stop_delete_test.go:274: (dbg) TestStartStop/group/no-preload/serial/UserAppExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:344: "kubernetes-dashboard-5c5cfc8747-n2dfb" [8abd4274-b107-46b4-a11c-ef8256231972] Pending / Ready:ContainersNotReady (containers with unready status: [kubernetes-dashboard]) / ContainersReady:ContainersNotReady (containers with unready status: [kubernetes-dashboard])
E0731 11:18:19.059406   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/old-k8s-version-070357/client.crt: no such file or directory
helpers_test.go:344: "kubernetes-dashboard-5c5cfc8747-n2dfb" [8abd4274-b107-46b4-a11c-ef8256231972] Running
E0731 11:18:24.593681   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/auto-577222/client.crt: no such file or directory
start_stop_delete_test.go:274: (dbg) TestStartStop/group/no-preload/serial/UserAppExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 14.067467645s
--- PASS: TestStartStop/group/no-preload/serial/UserAppExistsAfterStop (14.07s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/AddonExistsAfterStop (5.1s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/AddonExistsAfterStop
start_stop_delete_test.go:287: (dbg) TestStartStop/group/no-preload/serial/AddonExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:344: "kubernetes-dashboard-5c5cfc8747-n2dfb" [8abd4274-b107-46b4-a11c-ef8256231972] Running
E0731 11:18:29.773180   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/bridge-577222/client.crt: no such file or directory
start_stop_delete_test.go:287: (dbg) TestStartStop/group/no-preload/serial/AddonExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 5.009363967s
start_stop_delete_test.go:291: (dbg) Run:  kubectl --context no-preload-299339 describe deploy/dashboard-metrics-scraper -n kubernetes-dashboard
--- PASS: TestStartStop/group/no-preload/serial/AddonExistsAfterStop (5.10s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/VerifyKubernetesImages (0.38s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/VerifyKubernetesImages
start_stop_delete_test.go:304: (dbg) Run:  out/minikube-linux-amd64 ssh -p no-preload-299339 "sudo crictl images -o json"
start_stop_delete_test.go:304: Found non-minikube image: kindest/kindnetd:v20230511-dc714da8
start_stop_delete_test.go:304: Found non-minikube image: gcr.io/k8s-minikube/busybox:1.28.4-glibc
--- PASS: TestStartStop/group/no-preload/serial/VerifyKubernetesImages (0.38s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/Pause (3.11s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/Pause
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-amd64 pause -p no-preload-299339 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-amd64 status --format={{.APIServer}} -p no-preload-299339 -n no-preload-299339
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.APIServer}} -p no-preload-299339 -n no-preload-299339: exit status 2 (317.016171ms)

                                                
                                                
-- stdout --
	Paused

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Kubelet}} -p no-preload-299339 -n no-preload-299339
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Kubelet}} -p no-preload-299339 -n no-preload-299339: exit status 2 (333.939701ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-amd64 unpause -p no-preload-299339 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-amd64 status --format={{.APIServer}} -p no-preload-299339 -n no-preload-299339
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Kubelet}} -p no-preload-299339 -n no-preload-299339
--- PASS: TestStartStop/group/no-preload/serial/Pause (3.11s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop (14.03s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop
start_stop_delete_test.go:274: (dbg) TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:344: "kubernetes-dashboard-5c5cfc8747-fhhld" [b27ccce2-5859-45f7-ae3f-4ed23b562a3b] Pending / Ready:ContainersNotReady (containers with unready status: [kubernetes-dashboard]) / ContainersReady:ContainersNotReady (containers with unready status: [kubernetes-dashboard])
helpers_test.go:344: "kubernetes-dashboard-5c5cfc8747-fhhld" [b27ccce2-5859-45f7-ae3f-4ed23b562a3b] Running
start_stop_delete_test.go:274: (dbg) TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 14.026266299s
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop (14.03s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop (14.06s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop
start_stop_delete_test.go:274: (dbg) TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:344: "kubernetes-dashboard-5c5cfc8747-c2nth" [50d55ecc-0034-4c61-99f6-b09ededac5fa] Pending / Ready:ContainersNotReady (containers with unready status: [kubernetes-dashboard]) / ContainersReady:ContainersNotReady (containers with unready status: [kubernetes-dashboard])
E0731 11:19:00.019906   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/old-k8s-version-070357/client.crt: no such file or directory
helpers_test.go:344: "kubernetes-dashboard-5c5cfc8747-c2nth" [50d55ecc-0034-4c61-99f6-b09ededac5fa] Running
start_stop_delete_test.go:274: (dbg) TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 14.058300389s
--- PASS: TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop (14.06s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop (5.08s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop
start_stop_delete_test.go:287: (dbg) TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:344: "kubernetes-dashboard-5c5cfc8747-fhhld" [b27ccce2-5859-45f7-ae3f-4ed23b562a3b] Running
start_stop_delete_test.go:287: (dbg) TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 5.009622346s
start_stop_delete_test.go:291: (dbg) Run:  kubectl --context default-k8s-diff-port-628397 describe deploy/dashboard-metrics-scraper -n kubernetes-dashboard
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop (5.08s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/AddonExistsAfterStop (5.09s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/AddonExistsAfterStop
start_stop_delete_test.go:287: (dbg) TestStartStop/group/embed-certs/serial/AddonExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:344: "kubernetes-dashboard-5c5cfc8747-c2nth" [50d55ecc-0034-4c61-99f6-b09ededac5fa] Running
E0731 11:19:12.424207   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/kindnet-577222/client.crt: no such file or directory
start_stop_delete_test.go:287: (dbg) TestStartStop/group/embed-certs/serial/AddonExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 5.009235271s
start_stop_delete_test.go:291: (dbg) Run:  kubectl --context embed-certs-746333 describe deploy/dashboard-metrics-scraper -n kubernetes-dashboard
--- PASS: TestStartStop/group/embed-certs/serial/AddonExistsAfterStop (5.09s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/VerifyKubernetesImages (0.29s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/VerifyKubernetesImages
start_stop_delete_test.go:304: (dbg) Run:  out/minikube-linux-amd64 ssh -p default-k8s-diff-port-628397 "sudo crictl images -o json"
start_stop_delete_test.go:304: Found non-minikube image: kindest/kindnetd:v20230511-dc714da8
start_stop_delete_test.go:304: Found non-minikube image: gcr.io/k8s-minikube/busybox:1.28.4-glibc
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/VerifyKubernetesImages (0.29s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/VerifyKubernetesImages (0.3s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/VerifyKubernetesImages
start_stop_delete_test.go:304: (dbg) Run:  out/minikube-linux-amd64 ssh -p embed-certs-746333 "sudo crictl images -o json"
start_stop_delete_test.go:304: Found non-minikube image: kindest/kindnetd:v20230511-dc714da8
start_stop_delete_test.go:304: Found non-minikube image: gcr.io/k8s-minikube/busybox:1.28.4-glibc
--- PASS: TestStartStop/group/embed-certs/serial/VerifyKubernetesImages (0.30s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/Pause (2.67s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/Pause
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-amd64 pause -p default-k8s-diff-port-628397 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-amd64 status --format={{.APIServer}} -p default-k8s-diff-port-628397 -n default-k8s-diff-port-628397
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.APIServer}} -p default-k8s-diff-port-628397 -n default-k8s-diff-port-628397: exit status 2 (291.106094ms)

                                                
                                                
-- stdout --
	Paused

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Kubelet}} -p default-k8s-diff-port-628397 -n default-k8s-diff-port-628397
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Kubelet}} -p default-k8s-diff-port-628397 -n default-k8s-diff-port-628397: exit status 2 (296.581233ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-amd64 unpause -p default-k8s-diff-port-628397 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-amd64 status --format={{.APIServer}} -p default-k8s-diff-port-628397 -n default-k8s-diff-port-628397
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Kubelet}} -p default-k8s-diff-port-628397 -n default-k8s-diff-port-628397
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/Pause (2.67s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/Pause (2.63s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/Pause
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-amd64 pause -p embed-certs-746333 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-amd64 status --format={{.APIServer}} -p embed-certs-746333 -n embed-certs-746333
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.APIServer}} -p embed-certs-746333 -n embed-certs-746333: exit status 2 (280.423022ms)

                                                
                                                
-- stdout --
	Paused

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Kubelet}} -p embed-certs-746333 -n embed-certs-746333
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Kubelet}} -p embed-certs-746333 -n embed-certs-746333: exit status 2 (292.12663ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-amd64 unpause -p embed-certs-746333 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-amd64 status --format={{.APIServer}} -p embed-certs-746333 -n embed-certs-746333
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Kubelet}} -p embed-certs-746333 -n embed-certs-746333
--- PASS: TestStartStop/group/embed-certs/serial/Pause (2.63s)

                                                
                                    

Test skip (24/304)

x
+
TestDownloadOnly/v1.16.0/cached-images (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.16.0/cached-images
aaa_download_only_test.go:117: Preload exists, images won't be cached
--- SKIP: TestDownloadOnly/v1.16.0/cached-images (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.16.0/binaries (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.16.0/binaries
aaa_download_only_test.go:136: Preload exists, binaries are present within.
--- SKIP: TestDownloadOnly/v1.16.0/binaries (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.16.0/kubectl (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.16.0/kubectl
aaa_download_only_test.go:152: Test for darwin and windows
--- SKIP: TestDownloadOnly/v1.16.0/kubectl (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.27.3/cached-images (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.27.3/cached-images
aaa_download_only_test.go:117: Preload exists, images won't be cached
--- SKIP: TestDownloadOnly/v1.27.3/cached-images (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.27.3/binaries (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.27.3/binaries
aaa_download_only_test.go:136: Preload exists, binaries are present within.
--- SKIP: TestDownloadOnly/v1.27.3/binaries (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.27.3/kubectl (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.27.3/kubectl
aaa_download_only_test.go:152: Test for darwin and windows
--- SKIP: TestDownloadOnly/v1.27.3/kubectl (0.00s)

                                                
                                    
x
+
TestAddons/parallel/Olm (0s)

                                                
                                                
=== RUN   TestAddons/parallel/Olm
=== PAUSE TestAddons/parallel/Olm

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/Olm
addons_test.go:474: Skipping OLM addon test until https://github.com/operator-framework/operator-lifecycle-manager/issues/2534 is resolved
--- SKIP: TestAddons/parallel/Olm (0.00s)

                                                
                                    
x
+
TestDockerFlags (0s)

                                                
                                                
=== RUN   TestDockerFlags
docker_test.go:41: skipping: only runs with docker container runtime, currently testing crio
--- SKIP: TestDockerFlags (0.00s)

                                                
                                    
x
+
TestDockerEnvContainerd (0s)

                                                
                                                
=== RUN   TestDockerEnvContainerd
docker_test.go:170: running with crio true linux amd64
docker_test.go:172: skipping: TestDockerEnvContainerd can only be run with the containerd runtime on Docker driver
--- SKIP: TestDockerEnvContainerd (0.00s)

                                                
                                    
x
+
TestHyperKitDriverInstallOrUpdate (0s)

                                                
                                                
=== RUN   TestHyperKitDriverInstallOrUpdate
driver_install_or_update_test.go:105: Skip if not darwin.
--- SKIP: TestHyperKitDriverInstallOrUpdate (0.00s)

                                                
                                    
x
+
TestHyperkitDriverSkipUpgrade (0s)

                                                
                                                
=== RUN   TestHyperkitDriverSkipUpgrade
driver_install_or_update_test.go:169: Skip if not darwin.
--- SKIP: TestHyperkitDriverSkipUpgrade (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/DockerEnv (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/DockerEnv
=== PAUSE TestFunctional/parallel/DockerEnv

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/DockerEnv
functional_test.go:459: only validate docker env with docker container runtime, currently testing crio
--- SKIP: TestFunctional/parallel/DockerEnv (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/PodmanEnv (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/PodmanEnv
=== PAUSE TestFunctional/parallel/PodmanEnv

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/PodmanEnv
functional_test.go:546: only validate podman env with docker container runtime, currently testing crio
--- SKIP: TestFunctional/parallel/PodmanEnv (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDig (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDig
functional_test_tunnel_test.go:99: DNS forwarding is only supported for Hyperkit on Darwin, skipping test DNS forwarding
--- SKIP: TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDig (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDscacheutil (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDscacheutil
functional_test_tunnel_test.go:99: DNS forwarding is only supported for Hyperkit on Darwin, skipping test DNS forwarding
--- SKIP: TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDscacheutil (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/AccessThroughDNS (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/AccessThroughDNS
functional_test_tunnel_test.go:99: DNS forwarding is only supported for Hyperkit on Darwin, skipping test DNS forwarding
--- SKIP: TestFunctional/parallel/TunnelCmd/serial/AccessThroughDNS (0.00s)

                                                
                                    
x
+
TestGvisorAddon (0s)

                                                
                                                
=== RUN   TestGvisorAddon
gvisor_addon_test.go:34: skipping test because --gvisor=false
--- SKIP: TestGvisorAddon (0.00s)

                                                
                                    
x
+
TestImageBuild (0s)

                                                
                                                
=== RUN   TestImageBuild
image_test.go:33: 
--- SKIP: TestImageBuild (0.00s)

                                                
                                    
x
+
TestChangeNoneUser (0s)

                                                
                                                
=== RUN   TestChangeNoneUser
none_test.go:38: Test requires none driver and SUDO_USER env to not be empty
--- SKIP: TestChangeNoneUser (0.00s)

                                                
                                    
x
+
TestScheduledStopWindows (0s)

                                                
                                                
=== RUN   TestScheduledStopWindows
scheduled_stop_test.go:42: test only runs on windows
--- SKIP: TestScheduledStopWindows (0.00s)

                                                
                                    
x
+
TestSkaffold (0s)

                                                
                                                
=== RUN   TestSkaffold
skaffold_test.go:45: skaffold requires docker-env, currently testing crio container runtime
--- SKIP: TestSkaffold (0.00s)

                                                
                                    
x
+
TestNetworkPlugins/group/kubenet (3.5s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kubenet
net_test.go:93: Skipping the test as crio container runtimes requires CNI
panic.go:522: 
----------------------- debugLogs start: kubenet-577222 [pass: true] --------------------------------
>>> netcat: nslookup kubernetes.default:
Error in configuration: context was not found for specified context: kubenet-577222

                                                
                                                

                                                
                                                
>>> netcat: nslookup debug kubernetes.default a-records:
Error in configuration: context was not found for specified context: kubenet-577222

                                                
                                                

                                                
                                                
>>> netcat: dig search kubernetes.default:
Error in configuration: context was not found for specified context: kubenet-577222

                                                
                                                

                                                
                                                
>>> netcat: dig @10.96.0.10 kubernetes.default.svc.cluster.local udp/53:
Error in configuration: context was not found for specified context: kubenet-577222

                                                
                                                

                                                
                                                
>>> netcat: dig @10.96.0.10 kubernetes.default.svc.cluster.local tcp/53:
Error in configuration: context was not found for specified context: kubenet-577222

                                                
                                                

                                                
                                                
>>> netcat: nc 10.96.0.10 udp/53:
Error in configuration: context was not found for specified context: kubenet-577222

                                                
                                                

                                                
                                                
>>> netcat: nc 10.96.0.10 tcp/53:
Error in configuration: context was not found for specified context: kubenet-577222

                                                
                                                

                                                
                                                
>>> netcat: /etc/nsswitch.conf:
Error in configuration: context was not found for specified context: kubenet-577222

                                                
                                                

                                                
                                                
>>> netcat: /etc/hosts:
Error in configuration: context was not found for specified context: kubenet-577222

                                                
                                                

                                                
                                                
>>> netcat: /etc/resolv.conf:
Error in configuration: context was not found for specified context: kubenet-577222

                                                
                                                

                                                
                                                
>>> host: /etc/nsswitch.conf:
* Profile "kubenet-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-577222"

                                                
                                                

                                                
                                                
>>> host: /etc/hosts:
* Profile "kubenet-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-577222"

                                                
                                                

                                                
                                                
>>> host: /etc/resolv.conf:
* Profile "kubenet-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-577222"

                                                
                                                

                                                
                                                
>>> k8s: nodes, services, endpoints, daemon sets, deployments and pods, :
Error in configuration: context was not found for specified context: kubenet-577222

                                                
                                                

                                                
                                                
>>> host: crictl pods:
* Profile "kubenet-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-577222"

                                                
                                                

                                                
                                                
>>> host: crictl containers:
* Profile "kubenet-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-577222"

                                                
                                                

                                                
                                                
>>> k8s: describe netcat deployment:
error: context "kubenet-577222" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe netcat pod(s):
error: context "kubenet-577222" does not exist

                                                
                                                

                                                
                                                
>>> k8s: netcat logs:
error: context "kubenet-577222" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe coredns deployment:
error: context "kubenet-577222" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe coredns pods:
error: context "kubenet-577222" does not exist

                                                
                                                

                                                
                                                
>>> k8s: coredns logs:
error: context "kubenet-577222" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe api server pod(s):
error: context "kubenet-577222" does not exist

                                                
                                                

                                                
                                                
>>> k8s: api server logs:
error: context "kubenet-577222" does not exist

                                                
                                                

                                                
                                                
>>> host: /etc/cni:
* Profile "kubenet-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-577222"

                                                
                                                

                                                
                                                
>>> host: ip a s:
* Profile "kubenet-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-577222"

                                                
                                                

                                                
                                                
>>> host: ip r s:
* Profile "kubenet-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-577222"

                                                
                                                

                                                
                                                
>>> host: iptables-save:
* Profile "kubenet-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-577222"

                                                
                                                

                                                
                                                
>>> host: iptables table nat:
* Profile "kubenet-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-577222"

                                                
                                                

                                                
                                                
>>> k8s: describe kube-proxy daemon set:
error: context "kubenet-577222" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe kube-proxy pod(s):
error: context "kubenet-577222" does not exist

                                                
                                                

                                                
                                                
>>> k8s: kube-proxy logs:
error: context "kubenet-577222" does not exist

                                                
                                                

                                                
                                                
>>> host: kubelet daemon status:
* Profile "kubenet-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-577222"

                                                
                                                

                                                
                                                
>>> host: kubelet daemon config:
* Profile "kubenet-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-577222"

                                                
                                                

                                                
                                                
>>> k8s: kubelet logs:
* Profile "kubenet-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-577222"

                                                
                                                

                                                
                                                
>>> host: /etc/kubernetes/kubelet.conf:
* Profile "kubenet-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-577222"

                                                
                                                

                                                
                                                
>>> host: /var/lib/kubelet/config.yaml:
* Profile "kubenet-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-577222"

                                                
                                                

                                                
                                                
>>> k8s: kubectl config:
apiVersion: v1
clusters: null
contexts: null
current-context: ""
kind: Config
preferences: {}
users: null

                                                
                                                

                                                
                                                
>>> k8s: cms:
Error in configuration: context was not found for specified context: kubenet-577222

                                                
                                                

                                                
                                                
>>> host: docker daemon status:
* Profile "kubenet-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-577222"

                                                
                                                

                                                
                                                
>>> host: docker daemon config:
* Profile "kubenet-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-577222"

                                                
                                                

                                                
                                                
>>> host: /etc/docker/daemon.json:
* Profile "kubenet-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-577222"

                                                
                                                

                                                
                                                
>>> host: docker system info:
* Profile "kubenet-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-577222"

                                                
                                                

                                                
                                                
>>> host: cri-docker daemon status:
* Profile "kubenet-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-577222"

                                                
                                                

                                                
                                                
>>> host: cri-docker daemon config:
* Profile "kubenet-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-577222"

                                                
                                                

                                                
                                                
>>> host: /etc/systemd/system/cri-docker.service.d/10-cni.conf:
* Profile "kubenet-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-577222"

                                                
                                                

                                                
                                                
>>> host: /usr/lib/systemd/system/cri-docker.service:
* Profile "kubenet-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-577222"

                                                
                                                

                                                
                                                
>>> host: cri-dockerd version:
* Profile "kubenet-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-577222"

                                                
                                                

                                                
                                                
>>> host: containerd daemon status:
* Profile "kubenet-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-577222"

                                                
                                                

                                                
                                                
>>> host: containerd daemon config:
* Profile "kubenet-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-577222"

                                                
                                                

                                                
                                                
>>> host: /lib/systemd/system/containerd.service:
* Profile "kubenet-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-577222"

                                                
                                                

                                                
                                                
>>> host: /etc/containerd/config.toml:
* Profile "kubenet-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-577222"

                                                
                                                

                                                
                                                
>>> host: containerd config dump:
* Profile "kubenet-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-577222"

                                                
                                                

                                                
                                                
>>> host: crio daemon status:
* Profile "kubenet-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-577222"

                                                
                                                

                                                
                                                
>>> host: crio daemon config:
* Profile "kubenet-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-577222"

                                                
                                                

                                                
                                                
>>> host: /etc/crio:
* Profile "kubenet-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-577222"

                                                
                                                

                                                
                                                
>>> host: crio config:
* Profile "kubenet-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p kubenet-577222"

                                                
                                                
----------------------- debugLogs end: kubenet-577222 [took: 3.298264927s] --------------------------------
helpers_test.go:175: Cleaning up "kubenet-577222" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p kubenet-577222
E0731 11:03:35.368719   12537 cert_rotation.go:168] key failed with : open /home/jenkins/minikube-integration/16969-5799/.minikube/profiles/functional-683521/client.crt: no such file or directory
--- SKIP: TestNetworkPlugins/group/kubenet (3.50s)

                                                
                                    
x
+
TestNetworkPlugins/group/cilium (3.51s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/cilium
net_test.go:102: Skipping the test as it's interfering with other tests and is outdated
panic.go:522: 
----------------------- debugLogs start: cilium-577222 [pass: true] --------------------------------
>>> netcat: nslookup kubernetes.default:
Error in configuration: context was not found for specified context: cilium-577222

                                                
                                                

                                                
                                                
>>> netcat: nslookup debug kubernetes.default a-records:
Error in configuration: context was not found for specified context: cilium-577222

                                                
                                                

                                                
                                                
>>> netcat: dig search kubernetes.default:
Error in configuration: context was not found for specified context: cilium-577222

                                                
                                                

                                                
                                                
>>> netcat: dig @10.96.0.10 kubernetes.default.svc.cluster.local udp/53:
Error in configuration: context was not found for specified context: cilium-577222

                                                
                                                

                                                
                                                
>>> netcat: dig @10.96.0.10 kubernetes.default.svc.cluster.local tcp/53:
Error in configuration: context was not found for specified context: cilium-577222

                                                
                                                

                                                
                                                
>>> netcat: nc 10.96.0.10 udp/53:
Error in configuration: context was not found for specified context: cilium-577222

                                                
                                                

                                                
                                                
>>> netcat: nc 10.96.0.10 tcp/53:
Error in configuration: context was not found for specified context: cilium-577222

                                                
                                                

                                                
                                                
>>> netcat: /etc/nsswitch.conf:
Error in configuration: context was not found for specified context: cilium-577222

                                                
                                                

                                                
                                                
>>> netcat: /etc/hosts:
Error in configuration: context was not found for specified context: cilium-577222

                                                
                                                

                                                
                                                
>>> netcat: /etc/resolv.conf:
Error in configuration: context was not found for specified context: cilium-577222

                                                
                                                

                                                
                                                
>>> host: /etc/nsswitch.conf:
* Profile "cilium-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-577222"

                                                
                                                

                                                
                                                
>>> host: /etc/hosts:
* Profile "cilium-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-577222"

                                                
                                                

                                                
                                                
>>> host: /etc/resolv.conf:
* Profile "cilium-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-577222"

                                                
                                                

                                                
                                                
>>> k8s: nodes, services, endpoints, daemon sets, deployments and pods, :
Error in configuration: context was not found for specified context: cilium-577222

                                                
                                                

                                                
                                                
>>> host: crictl pods:
* Profile "cilium-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-577222"

                                                
                                                

                                                
                                                
>>> host: crictl containers:
* Profile "cilium-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-577222"

                                                
                                                

                                                
                                                
>>> k8s: describe netcat deployment:
error: context "cilium-577222" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe netcat pod(s):
error: context "cilium-577222" does not exist

                                                
                                                

                                                
                                                
>>> k8s: netcat logs:
error: context "cilium-577222" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe coredns deployment:
error: context "cilium-577222" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe coredns pods:
error: context "cilium-577222" does not exist

                                                
                                                

                                                
                                                
>>> k8s: coredns logs:
error: context "cilium-577222" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe api server pod(s):
error: context "cilium-577222" does not exist

                                                
                                                

                                                
                                                
>>> k8s: api server logs:
error: context "cilium-577222" does not exist

                                                
                                                

                                                
                                                
>>> host: /etc/cni:
* Profile "cilium-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-577222"

                                                
                                                

                                                
                                                
>>> host: ip a s:
* Profile "cilium-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-577222"

                                                
                                                

                                                
                                                
>>> host: ip r s:
* Profile "cilium-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-577222"

                                                
                                                

                                                
                                                
>>> host: iptables-save:
* Profile "cilium-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-577222"

                                                
                                                

                                                
                                                
>>> host: iptables table nat:
* Profile "cilium-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-577222"

                                                
                                                

                                                
                                                
>>> k8s: describe cilium daemon set:
Error in configuration: context was not found for specified context: cilium-577222

                                                
                                                

                                                
                                                
>>> k8s: describe cilium daemon set pod(s):
Error in configuration: context was not found for specified context: cilium-577222

                                                
                                                

                                                
                                                
>>> k8s: cilium daemon set container(s) logs (current):
error: context "cilium-577222" does not exist

                                                
                                                

                                                
                                                
>>> k8s: cilium daemon set container(s) logs (previous):
error: context "cilium-577222" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe cilium deployment:
Error in configuration: context was not found for specified context: cilium-577222

                                                
                                                

                                                
                                                
>>> k8s: describe cilium deployment pod(s):
Error in configuration: context was not found for specified context: cilium-577222

                                                
                                                

                                                
                                                
>>> k8s: cilium deployment container(s) logs (current):
error: context "cilium-577222" does not exist

                                                
                                                

                                                
                                                
>>> k8s: cilium deployment container(s) logs (previous):
error: context "cilium-577222" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe kube-proxy daemon set:
error: context "cilium-577222" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe kube-proxy pod(s):
error: context "cilium-577222" does not exist

                                                
                                                

                                                
                                                
>>> k8s: kube-proxy logs:
error: context "cilium-577222" does not exist

                                                
                                                

                                                
                                                
>>> host: kubelet daemon status:
* Profile "cilium-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-577222"

                                                
                                                

                                                
                                                
>>> host: kubelet daemon config:
* Profile "cilium-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-577222"

                                                
                                                

                                                
                                                
>>> k8s: kubelet logs:
* Profile "cilium-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-577222"

                                                
                                                

                                                
                                                
>>> host: /etc/kubernetes/kubelet.conf:
* Profile "cilium-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-577222"

                                                
                                                

                                                
                                                
>>> host: /var/lib/kubelet/config.yaml:
* Profile "cilium-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-577222"

                                                
                                                

                                                
                                                
>>> k8s: kubectl config:
apiVersion: v1
clusters: null
contexts: null
current-context: ""
kind: Config
preferences: {}
users: null

                                                
                                                

                                                
                                                
>>> k8s: cms:
Error in configuration: context was not found for specified context: cilium-577222

                                                
                                                

                                                
                                                
>>> host: docker daemon status:
* Profile "cilium-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-577222"

                                                
                                                

                                                
                                                
>>> host: docker daemon config:
* Profile "cilium-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-577222"

                                                
                                                

                                                
                                                
>>> host: /etc/docker/daemon.json:
* Profile "cilium-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-577222"

                                                
                                                

                                                
                                                
>>> host: docker system info:
* Profile "cilium-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-577222"

                                                
                                                

                                                
                                                
>>> host: cri-docker daemon status:
* Profile "cilium-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-577222"

                                                
                                                

                                                
                                                
>>> host: cri-docker daemon config:
* Profile "cilium-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-577222"

                                                
                                                

                                                
                                                
>>> host: /etc/systemd/system/cri-docker.service.d/10-cni.conf:
* Profile "cilium-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-577222"

                                                
                                                

                                                
                                                
>>> host: /usr/lib/systemd/system/cri-docker.service:
* Profile "cilium-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-577222"

                                                
                                                

                                                
                                                
>>> host: cri-dockerd version:
* Profile "cilium-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-577222"

                                                
                                                

                                                
                                                
>>> host: containerd daemon status:
* Profile "cilium-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-577222"

                                                
                                                

                                                
                                                
>>> host: containerd daemon config:
* Profile "cilium-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-577222"

                                                
                                                

                                                
                                                
>>> host: /lib/systemd/system/containerd.service:
* Profile "cilium-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-577222"

                                                
                                                

                                                
                                                
>>> host: /etc/containerd/config.toml:
* Profile "cilium-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-577222"

                                                
                                                

                                                
                                                
>>> host: containerd config dump:
* Profile "cilium-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-577222"

                                                
                                                

                                                
                                                
>>> host: crio daemon status:
* Profile "cilium-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-577222"

                                                
                                                

                                                
                                                
>>> host: crio daemon config:
* Profile "cilium-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-577222"

                                                
                                                

                                                
                                                
>>> host: /etc/crio:
* Profile "cilium-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-577222"

                                                
                                                

                                                
                                                
>>> host: crio config:
* Profile "cilium-577222" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-577222"

                                                
                                                
----------------------- debugLogs end: cilium-577222 [took: 3.379675958s] --------------------------------
helpers_test.go:175: Cleaning up "cilium-577222" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p cilium-577222
--- SKIP: TestNetworkPlugins/group/cilium (3.51s)

                                                
                                    
x
+
TestStartStop/group/disable-driver-mounts (0.43s)

                                                
                                                
=== RUN   TestStartStop/group/disable-driver-mounts
=== PAUSE TestStartStop/group/disable-driver-mounts

                                                
                                                

                                                
                                                
=== CONT  TestStartStop/group/disable-driver-mounts
start_stop_delete_test.go:103: skipping TestStartStop/group/disable-driver-mounts - only runs on virtualbox
helpers_test.go:175: Cleaning up "disable-driver-mounts-512057" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p disable-driver-mounts-512057
--- SKIP: TestStartStop/group/disable-driver-mounts (0.43s)

                                                
                                    
Copied to clipboard