=== RUN TestFunctional/serial/ComponentHealth
functional_test.go:805: (dbg) Run: kubectl --context functional-20220511225632-7294 get po -l tier=control-plane -n kube-system -o=json
functional_test.go:820: etcd phase: Running
functional_test.go:830: etcd status: Ready
functional_test.go:820: kube-apiserver phase: Running
functional_test.go:828: kube-apiserver is not Ready: {Phase:Running Conditions:[{Type:Initialized Status:True} {Type:Ready Status:False} {Type:ContainersReady Status:False} {Type:PodScheduled Status:True}] Message: Reason: HostIP:192.168.49.2 PodIP:192.168.49.2 StartTime:2022-05-11 22:56:56 +0000 UTC ContainerStatuses:[{Name:kube-apiserver State:{Waiting:<nil> Running:0xc00000fa88 Terminated:<nil>} LastTerminationState:{Waiting:<nil> Running:<nil> Terminated:0xc0006e8310} Ready:false RestartCount:1 Image:k8s.gcr.io/kube-apiserver:v1.23.5 ImageID:docker-pullable://k8s.gcr.io/kube-apiserver@sha256:ddf5bf7196eb534271f9e5d403f4da19838d5610bb5ca191001bde5f32b5492e ContainerID:docker://640a51c74016597821125f5f85706eddec23ca6c17e7dc241da0fd1e1f46302c}]}
functional_test.go:820: kube-controller-manager phase: Running
functional_test.go:830: kube-controller-manager status: Ready
functional_test.go:820: kube-scheduler phase: Running
functional_test.go:830: kube-scheduler status: Ready
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======> post-mortem[TestFunctional/serial/ComponentHealth]: docker inspect <======
helpers_test.go:231: (dbg) Run: docker inspect functional-20220511225632-7294
helpers_test.go:235: (dbg) docker inspect functional-20220511225632-7294:
-- stdout --
[
{
"Id": "297a9b29d75379ae614ab55c9b88719b8a750dbd7b65f91c436e1ceedcd73700",
"Created": "2022-05-11T22:56:40.619241008Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 29659,
"ExitCode": 0,
"Error": "",
"StartedAt": "2022-05-11T22:56:40.989966832Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:8a42e1145657f551cd435eddb43b96ab44d0facbe44106da934225366eeb7757",
"ResolvConfPath": "/var/lib/docker/containers/297a9b29d75379ae614ab55c9b88719b8a750dbd7b65f91c436e1ceedcd73700/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/297a9b29d75379ae614ab55c9b88719b8a750dbd7b65f91c436e1ceedcd73700/hostname",
"HostsPath": "/var/lib/docker/containers/297a9b29d75379ae614ab55c9b88719b8a750dbd7b65f91c436e1ceedcd73700/hosts",
"LogPath": "/var/lib/docker/containers/297a9b29d75379ae614ab55c9b88719b8a750dbd7b65f91c436e1ceedcd73700/297a9b29d75379ae614ab55c9b88719b8a750dbd7b65f91c436e1ceedcd73700-json.log",
"Name": "/functional-20220511225632-7294",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"functional-20220511225632-7294:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "functional-20220511225632-7294",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8441/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"ConsoleSize": [
0,
0
],
"Isolation": "",
"CpuShares": 0,
"Memory": 4194304000,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": null,
"BlkioDeviceWriteBps": null,
"BlkioDeviceReadIOps": null,
"BlkioDeviceWriteIOps": null,
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"KernelMemory": 0,
"KernelMemoryTCP": 0,
"MemoryReservation": 0,
"MemorySwap": 8388608000,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": null,
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"LowerDir": "/var/lib/docker/overlay2/483b0956a4e375533cceb20533b79b8ea13606751224172f6843ef594e2d9e57-init/diff:/var/lib/docker/overlay2/481ef5eb4df205b13d647c107a3101bb0cfb2ac6238024ebbdc415acba840ac3/diff:/var/lib/docker/overlay2/44ef5ffd67acceb963fbf4cdcde72e27eaf67db91720a5686c47a2ae719809f9/diff:/var/lib/docker/overlay2/7ff54885d7e73b28c072dd4c473d278ed1466486f2f98179ee07e6c6422805c9/diff:/var/lib/docker/overlay2/d0b295cb8ada7da56d6549b14ca3e7b6a9e2afc0be8e503095107cac74d2a3f7/diff:/var/lib/docker/overlay2/3fdb692340656f907b8a43439a8391a2e69c4a45237c37e1d60c8ab8b18134de/diff:/var/lib/docker/overlay2/ce96c5d9236d7dbf050f4a26d46beafb112fcdb1b99fd0e59aab0bf3b193fb31/diff:/var/lib/docker/overlay2/362cc1c81285daac3e4db5af5bf8bbb2629e6523f1eb8fc17820bea7a6d9baf6/diff:/var/lib/docker/overlay2/aeb2974007b88ff3d614b4625092ea83c3c6078ba4a81d6d9f963be18e81fe69/diff:/var/lib/docker/overlay2/618b7d0e6c402c44813c15391c811c50273ecca1c7afa805dc1154ac15783fd8/diff:/var/lib/docker/overlay2/518382
718f741ff88f2a15b4742b5269f766985211c081e1294953249c9d2f18/diff:/var/lib/docker/overlay2/29c1818c997ca6b4d1669cb9fbf0b9e9952cb5a4f75318dc83610d05108109e7/diff:/var/lib/docker/overlay2/4ca08ab854a5bc4ea21b75f275f1abdadb03b79d8d316cb22c34eebb9d7db763/diff:/var/lib/docker/overlay2/d01a62458c8e9ffd29f4eb55dea1bf7e3b9f40b2cf97aac0dc26e6905158a6e1/diff:/var/lib/docker/overlay2/df8cbcf60376f50c16dc844babd34f14f0025fb70e372d63939b7843aaaf573a/diff:/var/lib/docker/overlay2/9841b2c5577feffb0829804aae929612848d09bff6890f646fef522fff253805/diff:/var/lib/docker/overlay2/dd179b3df1d8d5cb134817000dbac174dda79dcba018a0e1463092ab3bee5917/diff:/var/lib/docker/overlay2/ab000a8fb4aab002b6741795a30b9f4a2f4a9991a55186a5c33224b5c645dbd9/diff:/var/lib/docker/overlay2/3c9e5cab81e8274fae5913e603b8af09571cdeb02c5901d3a05c3266295c1a5f/diff:/var/lib/docker/overlay2/2f020f41c690b4ba78947fd0a89c7cd02e66039c58d08826e064bf3c73c1a235/diff:/var/lib/docker/overlay2/6b6ece698eef5a9aff115bcb30a9dd6a7c45a281e1782b655b5ca871e91cc39b/diff:/var/lib/d
ocker/overlay2/d9cff6a065f8a5c1ebddb19d216253876ced33e502d362c8283655902a4e6a18/diff:/var/lib/docker/overlay2/55476b4128c7c3982f852fd6806bb6fbb16f54fbd0be9d96233867c13fc6e4af/diff:/var/lib/docker/overlay2/e6a57691d6921e1675f93cee749cc18b4b353f7adbd14d05af0da48cf32cddba/diff:/var/lib/docker/overlay2/0c19aee5e4c0dfebe55be9d7eb972a0dcc84ce9a59283396e5d0213269b405e1/diff:/var/lib/docker/overlay2/5f1b35290c6d86412de46be9df212ef0c94759a5eaa6519e24944f9d040cb6d5/diff:/var/lib/docker/overlay2/a2c96b37966fd7143034837aa05c57d00661838f1f42c9572899bd94d5bfec2d/diff:/var/lib/docker/overlay2/83aeec0e301d1fcfc1f247abd5e9d59c1cce47d9836550601333c482862eb3c4/diff:/var/lib/docker/overlay2/e12c190d34c775f1c32c2b64baa66a78fbdc83dbfa90ce6d7bd58c09ded96d69/diff:/var/lib/docker/overlay2/3983af7d86faf879b24e2a4906a9946e109a79baef58e4c208852845cce2a82a/diff:/var/lib/docker/overlay2/d11872013c6cbd6629a95f12c5e9d792b8c8f3a2d554398703c9d79881201733/diff:/var/lib/docker/overlay2/8764f20fe82fa0293ca9d0a908b3a74b268bc0eb064fa5db25d594c9099
dc7a9/diff:/var/lib/docker/overlay2/b998d9c59e72d7a6a39120be33a9c3929dcec3a21ce94525c7eb0026a20866ba/diff:/var/lib/docker/overlay2/ca0bfa3e2e36eeb161f4e2d4af49743810843aec10f16499ff3965530201431a/diff:/var/lib/docker/overlay2/7fba6225316b01ff4b801ac40682a3793ee00e3bdfd842aab2846f7c617f7e25/diff:/var/lib/docker/overlay2/82bcd40ee054fc1e5a22d92cb6bc25ef0b1aa8db3ed611fc2d061d487821d4f2/diff:/var/lib/docker/overlay2/a6344600b434bec2a336e523b24640486430591b0f091ab261fb991ffad5b728/diff:/var/lib/docker/overlay2/2627822a91f93e2419dda6670baae5f2d4643ed0ff2053b2326e6ce946e4f47b/diff:/var/lib/docker/overlay2/f1a3997c73ab9f38b321d361e56d041da5d5eebf5c95a4d5193e9269be85c82c/diff:/var/lib/docker/overlay2/a9432d575a3e1768603824c7f970bdca3828d2be7a0f213f8b5cda4106c3f9cf/diff:/var/lib/docker/overlay2/fa10e95b75bb4119664377fe1dbdbe3ba9905415b00e4756dc283d7fe361d3c0/diff:/var/lib/docker/overlay2/b15d5e2ea2d5aaffaeb03b448f01601b3280b5a8c365ab48d985c7dfa93570db/diff:/var/lib/docker/overlay2/708816f20892c0e4b94cbe8e80b169fff54ffd
870bcde395bd8163dd03b25d0f/diff",
"MergedDir": "/var/lib/docker/overlay2/483b0956a4e375533cceb20533b79b8ea13606751224172f6843ef594e2d9e57/merged",
"UpperDir": "/var/lib/docker/overlay2/483b0956a4e375533cceb20533b79b8ea13606751224172f6843ef594e2d9e57/diff",
"WorkDir": "/var/lib/docker/overlay2/483b0956a4e375533cceb20533b79b8ea13606751224172f6843ef594e2d9e57/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "functional-20220511225632-7294",
"Source": "/var/lib/docker/volumes/functional-20220511225632-7294/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "functional-20220511225632-7294",
"Domainname": "",
"User": "root",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8441/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.30-1652251400-14138@sha256:8c847a4aa2afc5a7fc659f9731046bf9cc7e788283deecc83c8633014fb0828a",
"Volumes": null,
"WorkingDir": "",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "functional-20220511225632-7294",
"name.minikube.sigs.k8s.io": "functional-20220511225632-7294",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "f5692a00c47bb6e269a72d36be2abfd0bb10f0fff3ae9930094e35c3b20de923",
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "49167"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "49166"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "49163"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "49165"
}
],
"8441/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "49164"
}
]
},
"SandboxKey": "/var/run/docker/netns/f5692a00c47b",
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"functional-20220511225632-7294": {
"IPAMConfig": {
"IPv4Address": "192.168.49.2"
},
"Links": null,
"Aliases": [
"297a9b29d753",
"functional-20220511225632-7294"
],
"NetworkID": "076a6e5580142bce22350a0b3a99d49b2e6d366be2df778a62fec88cae17dac6",
"EndpointID": "98f9ea35af862e48882bd02b47885c015b7cd19f8dc83bff4751c03e2d458cc3",
"Gateway": "192.168.49.1",
"IPAddress": "192.168.49.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"MacAddress": "02:42:c0:a8:31:02",
"DriverOpts": null
}
}
}
}
]
-- /stdout --
helpers_test.go:239: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p functional-20220511225632-7294 -n functional-20220511225632-7294
helpers_test.go:244: <<< TestFunctional/serial/ComponentHealth FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======> post-mortem[TestFunctional/serial/ComponentHealth]: minikube logs <======
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 -p functional-20220511225632-7294 logs -n 25
helpers_test.go:247: (dbg) Done: out/minikube-linux-amd64 -p functional-20220511225632-7294 logs -n 25: (1.330982527s)
helpers_test.go:252: TestFunctional/serial/ComponentHealth logs:
-- stdout --
*
* ==> Audit <==
* |---------|--------------------------------------------------------------------------|--------------------------------|---------|---------|---------------------|---------------------|
| Command | Args | Profile | User | Version | Start Time | End Time |
|---------|--------------------------------------------------------------------------|--------------------------------|---------|---------|---------------------|---------------------|
| pause | nospam-20220511225550-7294 | nospam-20220511225550-7294 | jenkins | v1.25.2 | 11 May 22 22:56 UTC | 11 May 22 22:56 UTC |
| | --log_dir | | | | | |
| | /tmp/nospam-20220511225550-7294 | | | | | |
| | pause | | | | | |
| unpause | nospam-20220511225550-7294 | nospam-20220511225550-7294 | jenkins | v1.25.2 | 11 May 22 22:56 UTC | 11 May 22 22:56 UTC |
| | --log_dir | | | | | |
| | /tmp/nospam-20220511225550-7294 | | | | | |
| | unpause | | | | | |
| unpause | nospam-20220511225550-7294 | nospam-20220511225550-7294 | jenkins | v1.25.2 | 11 May 22 22:56 UTC | 11 May 22 22:56 UTC |
| | --log_dir | | | | | |
| | /tmp/nospam-20220511225550-7294 | | | | | |
| | unpause | | | | | |
| unpause | nospam-20220511225550-7294 | nospam-20220511225550-7294 | jenkins | v1.25.2 | 11 May 22 22:56 UTC | 11 May 22 22:56 UTC |
| | --log_dir | | | | | |
| | /tmp/nospam-20220511225550-7294 | | | | | |
| | unpause | | | | | |
| stop | nospam-20220511225550-7294 | nospam-20220511225550-7294 | jenkins | v1.25.2 | 11 May 22 22:56 UTC | 11 May 22 22:56 UTC |
| | --log_dir | | | | | |
| | /tmp/nospam-20220511225550-7294 | | | | | |
| | stop | | | | | |
| stop | nospam-20220511225550-7294 | nospam-20220511225550-7294 | jenkins | v1.25.2 | 11 May 22 22:56 UTC | 11 May 22 22:56 UTC |
| | --log_dir | | | | | |
| | /tmp/nospam-20220511225550-7294 | | | | | |
| | stop | | | | | |
| stop | nospam-20220511225550-7294 | nospam-20220511225550-7294 | jenkins | v1.25.2 | 11 May 22 22:56 UTC | 11 May 22 22:56 UTC |
| | --log_dir | | | | | |
| | /tmp/nospam-20220511225550-7294 | | | | | |
| | stop | | | | | |
| delete | -p nospam-20220511225550-7294 | nospam-20220511225550-7294 | jenkins | v1.25.2 | 11 May 22 22:56 UTC | 11 May 22 22:56 UTC |
| start | -p | functional-20220511225632-7294 | jenkins | v1.25.2 | 11 May 22 22:56 UTC | 11 May 22 22:57 UTC |
| | functional-20220511225632-7294 | | | | | |
| | --memory=4000 | | | | | |
| | --apiserver-port=8441 | | | | | |
| | --wait=all --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| start | -p | functional-20220511225632-7294 | jenkins | v1.25.2 | 11 May 22 22:57 UTC | 11 May 22 22:57 UTC |
| | functional-20220511225632-7294 | | | | | |
| | --alsologtostderr -v=8 | | | | | |
| cache | functional-20220511225632-7294 | functional-20220511225632-7294 | jenkins | v1.25.2 | 11 May 22 22:57 UTC | 11 May 22 22:57 UTC |
| | cache add k8s.gcr.io/pause:3.1 | | | | | |
| cache | functional-20220511225632-7294 | functional-20220511225632-7294 | jenkins | v1.25.2 | 11 May 22 22:57 UTC | 11 May 22 22:57 UTC |
| | cache add k8s.gcr.io/pause:3.3 | | | | | |
| cache | functional-20220511225632-7294 | functional-20220511225632-7294 | jenkins | v1.25.2 | 11 May 22 22:57 UTC | 11 May 22 22:57 UTC |
| | cache add | | | | | |
| | k8s.gcr.io/pause:latest | | | | | |
| cache | functional-20220511225632-7294 cache add | functional-20220511225632-7294 | jenkins | v1.25.2 | 11 May 22 22:57 UTC | 11 May 22 22:57 UTC |
| | minikube-local-cache-test:functional-20220511225632-7294 | | | | | |
| cache | functional-20220511225632-7294 cache delete | functional-20220511225632-7294 | jenkins | v1.25.2 | 11 May 22 22:57 UTC | 11 May 22 22:57 UTC |
| | minikube-local-cache-test:functional-20220511225632-7294 | | | | | |
| cache | delete k8s.gcr.io/pause:3.3 | minikube | jenkins | v1.25.2 | 11 May 22 22:57 UTC | 11 May 22 22:57 UTC |
| cache | list | minikube | jenkins | v1.25.2 | 11 May 22 22:57 UTC | 11 May 22 22:57 UTC |
| ssh | functional-20220511225632-7294 | functional-20220511225632-7294 | jenkins | v1.25.2 | 11 May 22 22:57 UTC | 11 May 22 22:57 UTC |
| | ssh sudo crictl images | | | | | |
| ssh | functional-20220511225632-7294 | functional-20220511225632-7294 | jenkins | v1.25.2 | 11 May 22 22:57 UTC | 11 May 22 22:57 UTC |
| | ssh sudo docker rmi | | | | | |
| | k8s.gcr.io/pause:latest | | | | | |
| cache | functional-20220511225632-7294 | functional-20220511225632-7294 | jenkins | v1.25.2 | 11 May 22 22:57 UTC | 11 May 22 22:57 UTC |
| | cache reload | | | | | |
| ssh | functional-20220511225632-7294 | functional-20220511225632-7294 | jenkins | v1.25.2 | 11 May 22 22:57 UTC | 11 May 22 22:57 UTC |
| | ssh sudo crictl inspecti | | | | | |
| | k8s.gcr.io/pause:latest | | | | | |
| cache | delete k8s.gcr.io/pause:3.1 | minikube | jenkins | v1.25.2 | 11 May 22 22:57 UTC | 11 May 22 22:57 UTC |
| cache | delete k8s.gcr.io/pause:latest | minikube | jenkins | v1.25.2 | 11 May 22 22:57 UTC | 11 May 22 22:57 UTC |
| kubectl | functional-20220511225632-7294 | functional-20220511225632-7294 | jenkins | v1.25.2 | 11 May 22 22:57 UTC | 11 May 22 22:57 UTC |
| | kubectl -- --context | | | | | |
| | functional-20220511225632-7294 | | | | | |
| | get pods | | | | | |
| start | -p functional-20220511225632-7294 | functional-20220511225632-7294 | jenkins | v1.25.2 | 11 May 22 22:57 UTC | 11 May 22 22:57 UTC |
| | --extra-config=apiserver.enable-admission-plugins=NamespaceAutoProvision | | | | | |
| | --wait=all | | | | | |
|---------|--------------------------------------------------------------------------|--------------------------------|---------|---------|---------------------|---------------------|
*
* ==> Last Start <==
* Log file created at: 2022/05/11 22:57:26
Running on machine: ubuntu-20-agent-14
Binary: Built with gc go1.18.1 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0511 22:57:26.299711 35457 out.go:296] Setting OutFile to fd 1 ...
I0511 22:57:26.299843 35457 out.go:343] TERM=,COLORTERM=, which probably does not support color
I0511 22:57:26.299846 35457 out.go:309] Setting ErrFile to fd 2...
I0511 22:57:26.299850 35457 out.go:343] TERM=,COLORTERM=, which probably does not support color
I0511 22:57:26.299951 35457 root.go:322] Updating PATH: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/bin
I0511 22:57:26.300218 35457 out.go:303] Setting JSON to false
I0511 22:57:26.301414 35457 start.go:115] hostinfo: {"hostname":"ubuntu-20-agent-14","uptime":2388,"bootTime":1652307458,"procs":529,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.13.0-1025-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I0511 22:57:26.301485 35457 start.go:125] virtualization: kvm guest
I0511 22:57:26.304447 35457 out.go:177] * [functional-20220511225632-7294] minikube v1.25.2 on Ubuntu 20.04 (kvm/amd64)
I0511 22:57:26.306068 35457 notify.go:193] Checking for updates...
I0511 22:57:26.307958 35457 out.go:177] - MINIKUBE_LOCATION=13639
I0511 22:57:26.309833 35457 out.go:177] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0511 22:57:26.311746 35457 out.go:177] - KUBECONFIG=/home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/kubeconfig
I0511 22:57:26.313435 35457 out.go:177] - MINIKUBE_HOME=/home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube
I0511 22:57:26.315074 35457 out.go:177] - MINIKUBE_BIN=out/minikube-linux-amd64
I0511 22:57:26.317023 35457 config.go:178] Loaded profile config "functional-20220511225632-7294": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.23.5
I0511 22:57:26.317070 35457 driver.go:358] Setting default libvirt URI to qemu:///system
I0511 22:57:26.357468 35457 docker.go:137] docker version: linux-20.10.15
I0511 22:57:26.357567 35457 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0511 22:57:26.460382 35457 info.go:265] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:63 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:true KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:34 OomKillDisable:true NGoroutines:39 SystemTime:2022-05-11 22:57:26.386005201 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.13.0-1025-gcp OperatingSystem:Ubuntu 20.04.4 LTS OSType:linux Architecture:x86_64 IndexSer
verAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33662795776 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-14 Labels:[] ExperimentalBuild:false ServerVersion:20.10.15 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:212e8b6fa2f44b9c21b2798135fc6fb7c53efc16 Expected:212e8b6fa2f44b9c21b2798135fc6fb7c53efc16} RuncCommit:{ID:v1.1.1-0-g52de29d Expected:v1.1.1-0-g52de29d} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=default] ProductLicense: Warnings:<nil> ServerErrors:[] Clie
ntInfo:{Debug:false Plugins:[map[Experimental:true Name:app Path:/usr/libexec/docker/cli-plugins/docker-app SchemaVersion:0.1.0 ShortDescription:Docker App Vendor:Docker Inc. Version:v0.9.1-beta3] map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.8.2-docker] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.5.0] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.17.0]] Warnings:<nil>}}
I0511 22:57:26.460485 35457 docker.go:254] overlay module found
I0511 22:57:26.462947 35457 out.go:177] * Using the docker driver based on existing profile
I0511 22:57:26.464441 35457 start.go:284] selected driver: docker
I0511 22:57:26.464447 35457 start.go:801] validating driver "docker" against &{Name:functional-20220511225632-7294 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.30-1652251400-14138@sha256:8c847a4aa2afc5a7fc659f9731046bf9cc7e788283deecc83c8633014fb0828a Memory:4000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.23.5 ClusterName:functional-20220511225632-7294 Namespace:de
fault APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8441 NodeName:} Nodes:[{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.23.5 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[ambassador:false auto-pause:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false helm-tiller:false ingress:false ingress-dns:false istio:false istio-provisioner:false kong:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provision
er-gluster:false volumesnapshots:false] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false}
I0511 22:57:26.464557 35457 start.go:812] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0511 22:57:26.464721 35457 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0511 22:57:26.563830 35457 info.go:265] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:63 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:true KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:34 OomKillDisable:true NGoroutines:39 SystemTime:2022-05-11 22:57:26.493941054 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.13.0-1025-gcp OperatingSystem:Ubuntu 20.04.4 LTS OSType:linux Architecture:x86_64 IndexSer
verAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33662795776 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-14 Labels:[] ExperimentalBuild:false ServerVersion:20.10.15 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:212e8b6fa2f44b9c21b2798135fc6fb7c53efc16 Expected:212e8b6fa2f44b9c21b2798135fc6fb7c53efc16} RuncCommit:{ID:v1.1.1-0-g52de29d Expected:v1.1.1-0-g52de29d} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=default] ProductLicense: Warnings:<nil> ServerErrors:[] Clie
ntInfo:{Debug:false Plugins:[map[Experimental:true Name:app Path:/usr/libexec/docker/cli-plugins/docker-app SchemaVersion:0.1.0 ShortDescription:Docker App Vendor:Docker Inc. Version:v0.9.1-beta3] map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.8.2-docker] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.5.0] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.17.0]] Warnings:<nil>}}
I0511 22:57:26.564371 35457 start_flags.go:847] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0511 22:57:26.564388 35457 cni.go:95] Creating CNI manager for ""
I0511 22:57:26.564394 35457 cni.go:169] CNI unnecessary in this configuration, recommending no CNI
I0511 22:57:26.564400 35457 start_flags.go:306] config:
{Name:functional-20220511225632-7294 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.30-1652251400-14138@sha256:8c847a4aa2afc5a7fc659f9731046bf9cc7e788283deecc83c8633014fb0828a Memory:4000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.23.5 ClusterName:functional-20220511225632-7294 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:clust
er.local ContainerRuntime:docker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8441 NodeName:} Nodes:[{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.23.5 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[ambassador:false auto-pause:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false helm-tiller:false ingress:false ingress-dns:false istio:false istio-provisioner:false kong:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisione
r-gluster:false volumesnapshots:false] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false}
I0511 22:57:26.566990 35457 out.go:177] * Starting control plane node functional-20220511225632-7294 in cluster functional-20220511225632-7294
I0511 22:57:26.568711 35457 cache.go:120] Beginning downloading kic base image for docker with docker
I0511 22:57:26.570103 35457 out.go:177] * Pulling base image ...
I0511 22:57:26.571496 35457 preload.go:132] Checking if preload exists for k8s version v1.23.5 and runtime docker
I0511 22:57:26.571537 35457 preload.go:148] Found local preload: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.23.5-docker-overlay2-amd64.tar.lz4
I0511 22:57:26.571544 35457 cache.go:57] Caching tarball of preloaded images
I0511 22:57:26.571613 35457 image.go:75] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.30-1652251400-14138@sha256:8c847a4aa2afc5a7fc659f9731046bf9cc7e788283deecc83c8633014fb0828a in local docker daemon
I0511 22:57:26.571779 35457 preload.go:174] Found /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.23.5-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I0511 22:57:26.571788 35457 cache.go:60] Finished verifying existence of preloaded tar for v1.23.5 on docker
I0511 22:57:26.571910 35457 profile.go:148] Saving config to /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/profiles/functional-20220511225632-7294/config.json ...
I0511 22:57:26.615635 35457 image.go:79] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.30-1652251400-14138@sha256:8c847a4aa2afc5a7fc659f9731046bf9cc7e788283deecc83c8633014fb0828a in local docker daemon, skipping pull
I0511 22:57:26.615656 35457 cache.go:141] gcr.io/k8s-minikube/kicbase-builds:v0.0.30-1652251400-14138@sha256:8c847a4aa2afc5a7fc659f9731046bf9cc7e788283deecc83c8633014fb0828a exists in daemon, skipping load
I0511 22:57:26.615674 35457 cache.go:206] Successfully downloaded all kic artifacts
I0511 22:57:26.615706 35457 start.go:352] acquiring machines lock for functional-20220511225632-7294: {Name:mk5a79e9556bd14104aeb40a2a2857a7cd7b6620 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0511 22:57:26.615820 35457 start.go:356] acquired machines lock for "functional-20220511225632-7294" in 97.809µs
I0511 22:57:26.615837 35457 start.go:94] Skipping create...Using existing machine configuration
I0511 22:57:26.615841 35457 fix.go:55] fixHost starting:
I0511 22:57:26.616071 35457 cli_runner.go:164] Run: docker container inspect functional-20220511225632-7294 --format={{.State.Status}}
I0511 22:57:26.649570 35457 fix.go:103] recreateIfNeeded on functional-20220511225632-7294: state=Running err=<nil>
W0511 22:57:26.649589 35457 fix.go:129] unexpected machine state, will restart: <nil>
I0511 22:57:26.653290 35457 out.go:177] * Updating the running docker "functional-20220511225632-7294" container ...
I0511 22:57:26.655032 35457 machine.go:88] provisioning docker machine ...
I0511 22:57:26.655061 35457 ubuntu.go:169] provisioning hostname "functional-20220511225632-7294"
I0511 22:57:26.655106 35457 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-20220511225632-7294
I0511 22:57:26.687284 35457 main.go:134] libmachine: Using SSH client type: native
I0511 22:57:26.687456 35457 main.go:134] libmachine: &{{{<nil> 0 [] [] []} docker [0x7da160] 0x7dd1c0 <nil> [] 0s} 127.0.0.1 49167 <nil> <nil>}
I0511 22:57:26.687469 35457 main.go:134] libmachine: About to run SSH command:
sudo hostname functional-20220511225632-7294 && echo "functional-20220511225632-7294" | sudo tee /etc/hostname
I0511 22:57:26.803241 35457 main.go:134] libmachine: SSH cmd err, output: <nil>: functional-20220511225632-7294
I0511 22:57:26.803316 35457 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-20220511225632-7294
I0511 22:57:26.835722 35457 main.go:134] libmachine: Using SSH client type: native
I0511 22:57:26.835854 35457 main.go:134] libmachine: &{{{<nil> 0 [] [] []} docker [0x7da160] 0x7dd1c0 <nil> [] 0s} 127.0.0.1 49167 <nil> <nil>}
I0511 22:57:26.835866 35457 main.go:134] libmachine: About to run SSH command:
if ! grep -xq '.*\sfunctional-20220511225632-7294' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 functional-20220511225632-7294/g' /etc/hosts;
else
echo '127.0.1.1 functional-20220511225632-7294' | sudo tee -a /etc/hosts;
fi
fi
I0511 22:57:26.946015 35457 main.go:134] libmachine: SSH cmd err, output: <nil>:
I0511 22:57:26.946033 35457 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube CaCertPath:/home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/certs/key.pem ServerCertR
emotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube}
I0511 22:57:26.946050 35457 ubuntu.go:177] setting up certificates
I0511 22:57:26.946057 35457 provision.go:83] configureAuth start
I0511 22:57:26.946096 35457 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" functional-20220511225632-7294
I0511 22:57:26.978133 35457 provision.go:138] copyHostCerts
I0511 22:57:26.978185 35457 exec_runner.go:144] found /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/key.pem, removing ...
I0511 22:57:26.978197 35457 exec_runner.go:207] rm: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/key.pem
I0511 22:57:26.978260 35457 exec_runner.go:151] cp: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/key.pem (1679 bytes)
I0511 22:57:26.978357 35457 exec_runner.go:144] found /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/ca.pem, removing ...
I0511 22:57:26.978362 35457 exec_runner.go:207] rm: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/ca.pem
I0511 22:57:26.978387 35457 exec_runner.go:151] cp: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/ca.pem (1078 bytes)
I0511 22:57:26.978438 35457 exec_runner.go:144] found /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/cert.pem, removing ...
I0511 22:57:26.978441 35457 exec_runner.go:207] rm: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/cert.pem
I0511 22:57:26.978459 35457 exec_runner.go:151] cp: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/cert.pem (1123 bytes)
I0511 22:57:26.978494 35457 provision.go:112] generating server cert: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/certs/ca-key.pem org=jenkins.functional-20220511225632-7294 san=[192.168.49.2 127.0.0.1 localhost 127.0.0.1 minikube functional-20220511225632-7294]
I0511 22:57:27.267427 35457 provision.go:172] copyRemoteCerts
I0511 22:57:27.267471 35457 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0511 22:57:27.267501 35457 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-20220511225632-7294
I0511 22:57:27.300911 35457 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:49167 SSHKeyPath:/home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/machines/functional-20220511225632-7294/id_rsa Username:docker}
I0511 22:57:27.381508 35457 ssh_runner.go:362] scp /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I0511 22:57:27.399033 35457 ssh_runner.go:362] scp /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/machines/server.pem --> /etc/docker/server.pem (1261 bytes)
I0511 22:57:27.415773 35457 ssh_runner.go:362] scp /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I0511 22:57:27.432458 35457 provision.go:86] duration metric: configureAuth took 486.392353ms
I0511 22:57:27.432474 35457 ubuntu.go:193] setting minikube options for container-runtime
I0511 22:57:27.432694 35457 config.go:178] Loaded profile config "functional-20220511225632-7294": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.23.5
I0511 22:57:27.432775 35457 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-20220511225632-7294
I0511 22:57:27.464732 35457 main.go:134] libmachine: Using SSH client type: native
I0511 22:57:27.464868 35457 main.go:134] libmachine: &{{{<nil> 0 [] [] []} docker [0x7da160] 0x7dd1c0 <nil> [] 0s} 127.0.0.1 49167 <nil> <nil>}
I0511 22:57:27.464875 35457 main.go:134] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I0511 22:57:27.574365 35457 main.go:134] libmachine: SSH cmd err, output: <nil>: overlay
I0511 22:57:27.574386 35457 ubuntu.go:71] root file system type: overlay
I0511 22:57:27.574530 35457 provision.go:309] Updating docker unit: /lib/systemd/system/docker.service ...
I0511 22:57:27.574575 35457 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-20220511225632-7294
I0511 22:57:27.606324 35457 main.go:134] libmachine: Using SSH client type: native
I0511 22:57:27.606456 35457 main.go:134] libmachine: &{{{<nil> 0 [] [] []} docker [0x7da160] 0x7dd1c0 <nil> [] 0s} 127.0.0.1 49167 <nil> <nil>}
I0511 22:57:27.606510 35457 main.go:134] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %!s(MISSING) "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I0511 22:57:27.722841 35457 main.go:134] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
I0511 22:57:27.722924 35457 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-20220511225632-7294
I0511 22:57:27.755417 35457 main.go:134] libmachine: Using SSH client type: native
I0511 22:57:27.755545 35457 main.go:134] libmachine: &{{{<nil> 0 [] [] []} docker [0x7da160] 0x7dd1c0 <nil> [] 0s} 127.0.0.1 49167 <nil> <nil>}
I0511 22:57:27.755557 35457 main.go:134] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I0511 22:57:27.865804 35457 main.go:134] libmachine: SSH cmd err, output: <nil>:
I0511 22:57:27.865821 35457 machine.go:91] provisioned docker machine in 1.210780322s
I0511 22:57:27.865832 35457 start.go:306] post-start starting for "functional-20220511225632-7294" (driver="docker")
I0511 22:57:27.865837 35457 start.go:316] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0511 22:57:27.865903 35457 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0511 22:57:27.865931 35457 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-20220511225632-7294
I0511 22:57:27.897770 35457 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:49167 SSHKeyPath:/home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/machines/functional-20220511225632-7294/id_rsa Username:docker}
I0511 22:57:27.981589 35457 ssh_runner.go:195] Run: cat /etc/os-release
I0511 22:57:27.984481 35457 main.go:134] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0511 22:57:27.984501 35457 main.go:134] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0511 22:57:27.984516 35457 main.go:134] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0511 22:57:27.984521 35457 info.go:137] Remote host: Ubuntu 20.04.4 LTS
I0511 22:57:27.984529 35457 filesync.go:126] Scanning /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/addons for local assets ...
I0511 22:57:27.984595 35457 filesync.go:126] Scanning /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/files for local assets ...
I0511 22:57:27.984675 35457 filesync.go:149] local asset: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/files/etc/ssl/certs/72942.pem -> 72942.pem in /etc/ssl/certs
I0511 22:57:27.984757 35457 filesync.go:149] local asset: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/files/etc/test/nested/copy/7294/hosts -> hosts in /etc/test/nested/copy/7294
I0511 22:57:27.984794 35457 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs /etc/test/nested/copy/7294
I0511 22:57:27.991584 35457 ssh_runner.go:362] scp /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/files/etc/ssl/certs/72942.pem --> /etc/ssl/certs/72942.pem (1708 bytes)
I0511 22:57:28.008816 35457 ssh_runner.go:362] scp /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/files/etc/test/nested/copy/7294/hosts --> /etc/test/nested/copy/7294/hosts (40 bytes)
I0511 22:57:28.027091 35457 start.go:309] post-start completed in 161.244551ms
I0511 22:57:28.027159 35457 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0511 22:57:28.027213 35457 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-20220511225632-7294
I0511 22:57:28.062710 35457 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:49167 SSHKeyPath:/home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/machines/functional-20220511225632-7294/id_rsa Username:docker}
I0511 22:57:28.142816 35457 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0511 22:57:28.146782 35457 fix.go:57] fixHost completed within 1.530931516s
I0511 22:57:28.146802 35457 start.go:81] releasing machines lock for "functional-20220511225632-7294", held for 1.530970154s
I0511 22:57:28.146878 35457 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" functional-20220511225632-7294
I0511 22:57:28.180169 35457 ssh_runner.go:195] Run: systemctl --version
I0511 22:57:28.180205 35457 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-20220511225632-7294
I0511 22:57:28.180271 35457 ssh_runner.go:195] Run: curl -sS -m 2 https://k8s.gcr.io/
I0511 22:57:28.180315 35457 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-20220511225632-7294
I0511 22:57:28.213717 35457 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:49167 SSHKeyPath:/home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/machines/functional-20220511225632-7294/id_rsa Username:docker}
I0511 22:57:28.217187 35457 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:49167 SSHKeyPath:/home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/machines/functional-20220511225632-7294/id_rsa Username:docker}
I0511 22:57:28.318492 35457 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0511 22:57:28.327916 35457 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0511 22:57:28.337194 35457 cruntime.go:273] skipping containerd shutdown because we are bound to it
I0511 22:57:28.337236 35457 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0511 22:57:28.346220 35457 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %!s(MISSING) "runtime-endpoint: unix:///var/run/dockershim.sock
image-endpoint: unix:///var/run/dockershim.sock
" | sudo tee /etc/crictl.yaml"
I0511 22:57:28.359402 35457 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I0511 22:57:28.456254 35457 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I0511 22:57:28.554663 35457 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0511 22:57:28.564301 35457 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0511 22:57:28.659872 35457 ssh_runner.go:195] Run: sudo systemctl start docker
I0511 22:57:28.669664 35457 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0511 22:57:28.708664 35457 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0511 22:57:28.748256 35457 out.go:204] * Preparing Kubernetes v1.23.5 on Docker 20.10.15 ...
I0511 22:57:28.748346 35457 cli_runner.go:164] Run: docker network inspect functional-20220511225632-7294 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0511 22:57:28.779465 35457 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0511 22:57:28.785079 35457 out.go:177] - apiserver.enable-admission-plugins=NamespaceAutoProvision
I0511 22:57:28.786749 35457 preload.go:132] Checking if preload exists for k8s version v1.23.5 and runtime docker
I0511 22:57:28.786796 35457 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0511 22:57:28.818039 35457 docker.go:610] Got preloaded images: -- stdout --
minikube-local-cache-test:functional-20220511225632-7294
k8s.gcr.io/kube-apiserver:v1.23.5
k8s.gcr.io/kube-proxy:v1.23.5
k8s.gcr.io/kube-controller-manager:v1.23.5
k8s.gcr.io/kube-scheduler:v1.23.5
k8s.gcr.io/etcd:3.5.1-0
k8s.gcr.io/coredns/coredns:v1.8.6
k8s.gcr.io/pause:3.6
gcr.io/k8s-minikube/storage-provisioner:v5
k8s.gcr.io/pause:3.3
k8s.gcr.io/pause:3.1
k8s.gcr.io/pause:latest
-- /stdout --
I0511 22:57:28.818061 35457 docker.go:541] Images already preloaded, skipping extraction
I0511 22:57:28.818109 35457 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0511 22:57:28.849860 35457 docker.go:610] Got preloaded images: -- stdout --
minikube-local-cache-test:functional-20220511225632-7294
k8s.gcr.io/kube-apiserver:v1.23.5
k8s.gcr.io/kube-proxy:v1.23.5
k8s.gcr.io/kube-controller-manager:v1.23.5
k8s.gcr.io/kube-scheduler:v1.23.5
k8s.gcr.io/etcd:3.5.1-0
k8s.gcr.io/coredns/coredns:v1.8.6
k8s.gcr.io/pause:3.6
gcr.io/k8s-minikube/storage-provisioner:v5
k8s.gcr.io/pause:3.3
k8s.gcr.io/pause:3.1
k8s.gcr.io/pause:latest
-- /stdout --
I0511 22:57:28.849877 35457 cache_images.go:84] Images are preloaded, skipping loading
I0511 22:57:28.849926 35457 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I0511 22:57:28.930423 35457 extraconfig.go:124] Overwriting default enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota with user provided enable-admission-plugins=NamespaceAutoProvision for component apiserver
I0511 22:57:28.930455 35457 cni.go:95] Creating CNI manager for ""
I0511 22:57:28.930463 35457 cni.go:169] CNI unnecessary in this configuration, recommending no CNI
I0511 22:57:28.930473 35457 kubeadm.go:87] Using pod CIDR: 10.244.0.0/16
I0511 22:57:28.930485 35457 kubeadm.go:158] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8441 KubernetesVersion:v1.23.5 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:functional-20220511225632-7294 NodeName:functional-20220511225632-7294 DNSDomain:cluster.local CRISocket:/var/run/dockershim.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceAutoProvision] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NoTaintMaster:true NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[]}
I0511 22:57:28.930598 35457 kubeadm.go:162] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.49.2
bindPort: 8441
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: "functional-20220511225632-7294"
kubeletExtraArgs:
node-ip: 192.168.49.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
enable-admission-plugins: "NamespaceAutoProvision"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8441
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.23.5
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%!"(MISSING)
nodefs.inodesFree: "0%!"(MISSING)
imagefs.available: "0%!"(MISSING)
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0511 22:57:28.930661 35457 kubeadm.go:936] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.23.5/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --container-runtime=docker --hostname-override=functional-20220511225632-7294 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
[Install]
config:
{KubernetesVersion:v1.23.5 ClusterName:functional-20220511225632-7294 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8441 NodeName:}
I0511 22:57:28.930702 35457 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.23.5
I0511 22:57:28.937732 35457 binaries.go:44] Found k8s binaries, skipping transfer
I0511 22:57:28.937777 35457 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I0511 22:57:28.944630 35457 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (356 bytes)
I0511 22:57:28.957779 35457 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0511 22:57:28.970412 35457 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (1902 bytes)
I0511 22:57:28.982839 35457 ssh_runner.go:195] Run: grep 192.168.49.2 control-plane.minikube.internal$ /etc/hosts
I0511 22:57:28.985876 35457 certs.go:54] Setting up /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/profiles/functional-20220511225632-7294 for IP: 192.168.49.2
I0511 22:57:28.985970 35457 certs.go:182] skipping minikubeCA CA generation: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/ca.key
I0511 22:57:28.986000 35457 certs.go:182] skipping proxyClientCA CA generation: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/proxy-client-ca.key
I0511 22:57:28.986058 35457 certs.go:298] skipping minikube-user signed cert generation: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/profiles/functional-20220511225632-7294/client.key
I0511 22:57:28.986096 35457 certs.go:298] skipping minikube signed cert generation: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/profiles/functional-20220511225632-7294/apiserver.key.dd3b5fb2
I0511 22:57:28.986161 35457 certs.go:298] skipping aggregator signed cert generation: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/profiles/functional-20220511225632-7294/proxy-client.key
I0511 22:57:28.986295 35457 certs.go:388] found cert: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/certs/home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/certs/7294.pem (1338 bytes)
W0511 22:57:28.986324 35457 certs.go:384] ignoring /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/certs/home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/certs/7294_empty.pem, impossibly tiny 0 bytes
I0511 22:57:28.986332 35457 certs.go:388] found cert: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/certs/home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/certs/ca-key.pem (1679 bytes)
I0511 22:57:28.986354 35457 certs.go:388] found cert: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/certs/home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/certs/ca.pem (1078 bytes)
I0511 22:57:28.986379 35457 certs.go:388] found cert: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/certs/home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/certs/cert.pem (1123 bytes)
I0511 22:57:28.986397 35457 certs.go:388] found cert: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/certs/home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/certs/key.pem (1679 bytes)
I0511 22:57:28.986439 35457 certs.go:388] found cert: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/files/etc/ssl/certs/home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/files/etc/ssl/certs/72942.pem (1708 bytes)
I0511 22:57:28.987027 35457 ssh_runner.go:362] scp /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/profiles/functional-20220511225632-7294/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1399 bytes)
I0511 22:57:29.004997 35457 ssh_runner.go:362] scp /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/profiles/functional-20220511225632-7294/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I0511 22:57:29.022540 35457 ssh_runner.go:362] scp /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/profiles/functional-20220511225632-7294/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0511 22:57:29.039827 35457 ssh_runner.go:362] scp /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/profiles/functional-20220511225632-7294/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I0511 22:57:29.057082 35457 ssh_runner.go:362] scp /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0511 22:57:29.075018 35457 ssh_runner.go:362] scp /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I0511 22:57:29.092492 35457 ssh_runner.go:362] scp /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0511 22:57:29.109681 35457 ssh_runner.go:362] scp /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I0511 22:57:29.127542 35457 ssh_runner.go:362] scp /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/certs/7294.pem --> /usr/share/ca-certificates/7294.pem (1338 bytes)
I0511 22:57:29.144900 35457 ssh_runner.go:362] scp /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/files/etc/ssl/certs/72942.pem --> /usr/share/ca-certificates/72942.pem (1708 bytes)
I0511 22:57:29.161806 35457 ssh_runner.go:362] scp /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0511 22:57:29.178889 35457 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0511 22:57:29.191516 35457 ssh_runner.go:195] Run: openssl version
I0511 22:57:29.196437 35457 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/7294.pem && ln -fs /usr/share/ca-certificates/7294.pem /etc/ssl/certs/7294.pem"
I0511 22:57:29.203994 35457 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/7294.pem
I0511 22:57:29.207040 35457 certs.go:431] hashing: -rw-r--r-- 1 root root 1338 May 11 22:56 /usr/share/ca-certificates/7294.pem
I0511 22:57:29.207076 35457 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/7294.pem
I0511 22:57:29.211935 35457 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/7294.pem /etc/ssl/certs/51391683.0"
I0511 22:57:29.218717 35457 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/72942.pem && ln -fs /usr/share/ca-certificates/72942.pem /etc/ssl/certs/72942.pem"
I0511 22:57:29.225976 35457 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/72942.pem
I0511 22:57:29.229001 35457 certs.go:431] hashing: -rw-r--r-- 1 root root 1708 May 11 22:56 /usr/share/ca-certificates/72942.pem
I0511 22:57:29.229054 35457 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/72942.pem
I0511 22:57:29.233716 35457 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/72942.pem /etc/ssl/certs/3ec20f2e.0"
I0511 22:57:29.240292 35457 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0511 22:57:29.247330 35457 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0511 22:57:29.250319 35457 certs.go:431] hashing: -rw-r--r-- 1 root root 1111 May 11 22:52 /usr/share/ca-certificates/minikubeCA.pem
I0511 22:57:29.250366 35457 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0511 22:57:29.255227 35457 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0511 22:57:29.261884 35457 kubeadm.go:391] StartCluster: {Name:functional-20220511225632-7294 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.30-1652251400-14138@sha256:8c847a4aa2afc5a7fc659f9731046bf9cc7e788283deecc83c8633014fb0828a Memory:4000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.23.5 ClusterName:functional-20220511225632-7294 Namespace:default APIServerName:
minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8441 NodeName:} Nodes:[{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.23.5 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[ambassador:false auto-pause:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false helm-tiller:false ingress:false ingress-dns:false istio:false istio-provisioner:false kong:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false regi
stry-creds:false storage-provisioner:true storage-provisioner-gluster:false volumesnapshots:false] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false}
I0511 22:57:29.262024 35457 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I0511 22:57:29.292432 35457 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0511 22:57:29.299532 35457 kubeadm.go:402] found existing configuration files, will attempt cluster restart
I0511 22:57:29.299546 35457 kubeadm.go:601] restartCluster start
I0511 22:57:29.299583 35457 ssh_runner.go:195] Run: sudo test -d /data/minikube
I0511 22:57:29.305635 35457 kubeadm.go:127] /data/minikube skipping compat symlinks: sudo test -d /data/minikube: Process exited with status 1
stdout:
stderr:
I0511 22:57:29.306083 35457 kubeconfig.go:92] found "functional-20220511225632-7294" server: "https://192.168.49.2:8441"
I0511 22:57:29.306928 35457 ssh_runner.go:195] Run: sudo diff -u /var/tmp/minikube/kubeadm.yaml /var/tmp/minikube/kubeadm.yaml.new
I0511 22:57:29.313424 35457 kubeadm.go:569] needs reconfigure: configs differ:
-- stdout --
--- /var/tmp/minikube/kubeadm.yaml 2022-05-11 22:56:45.043648012 +0000
+++ /var/tmp/minikube/kubeadm.yaml.new 2022-05-11 22:57:28.975794748 +0000
@@ -22,7 +22,7 @@
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
- enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
+ enable-admission-plugins: "NamespaceAutoProvision"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
-- /stdout --
I0511 22:57:29.313434 35457 kubeadm.go:1067] stopping kube-system containers ...
I0511 22:57:29.313473 35457 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I0511 22:57:29.346035 35457 docker.go:442] Stopping containers: [6d0ac8abd5b0 34aff32b4412 f70fad5bd078 78cac41764a3 d142d56ab3a5 96f3c0503a19 1221efbd7e06 ad029eb198e2 eb08f8db1d6e c1806e43aa64 5960bc7b5187 49e2085959b2 df57c21cb948 215089bd9ae9 16e87e1858eb]
I0511 22:57:29.346092 35457 ssh_runner.go:195] Run: docker stop 6d0ac8abd5b0 34aff32b4412 f70fad5bd078 78cac41764a3 d142d56ab3a5 96f3c0503a19 1221efbd7e06 ad029eb198e2 eb08f8db1d6e c1806e43aa64 5960bc7b5187 49e2085959b2 df57c21cb948 215089bd9ae9 16e87e1858eb
I0511 22:57:34.598708 35457 ssh_runner.go:235] Completed: docker stop 6d0ac8abd5b0 34aff32b4412 f70fad5bd078 78cac41764a3 d142d56ab3a5 96f3c0503a19 1221efbd7e06 ad029eb198e2 eb08f8db1d6e c1806e43aa64 5960bc7b5187 49e2085959b2 df57c21cb948 215089bd9ae9 16e87e1858eb: (5.252582863s)
I0511 22:57:34.598756 35457 ssh_runner.go:195] Run: sudo systemctl stop kubelet
I0511 22:57:34.685307 35457 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0511 22:57:34.692420 35457 kubeadm.go:155] found existing configuration files:
-rw------- 1 root root 5643 May 11 22:56 /etc/kubernetes/admin.conf
-rw------- 1 root root 5656 May 11 22:56 /etc/kubernetes/controller-manager.conf
-rw------- 1 root root 2059 May 11 22:56 /etc/kubernetes/kubelet.conf
-rw------- 1 root root 5604 May 11 22:56 /etc/kubernetes/scheduler.conf
I0511 22:57:34.692468 35457 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/admin.conf
I0511 22:57:34.699365 35457 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/kubelet.conf
I0511 22:57:34.706220 35457 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/controller-manager.conf
I0511 22:57:34.712599 35457 kubeadm.go:166] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/controller-manager.conf: Process exited with status 1
stdout:
stderr:
I0511 22:57:34.712647 35457 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0511 22:57:34.719093 35457 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/scheduler.conf
I0511 22:57:34.725477 35457 kubeadm.go:166] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/scheduler.conf: Process exited with status 1
stdout:
stderr:
I0511 22:57:34.725516 35457 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0511 22:57:34.731720 35457 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0511 22:57:34.738234 35457 kubeadm.go:678] reconfiguring cluster from /var/tmp/minikube/kubeadm.yaml
I0511 22:57:34.738247 35457 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.23.5:$PATH" kubeadm init phase certs all --config /var/tmp/minikube/kubeadm.yaml"
I0511 22:57:34.781129 35457 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.23.5:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml"
I0511 22:57:35.404939 35457 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.23.5:$PATH" kubeadm init phase kubelet-start --config /var/tmp/minikube/kubeadm.yaml"
I0511 22:57:35.552229 35457 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.23.5:$PATH" kubeadm init phase control-plane all --config /var/tmp/minikube/kubeadm.yaml"
I0511 22:57:35.601895 35457 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.23.5:$PATH" kubeadm init phase etcd local --config /var/tmp/minikube/kubeadm.yaml"
I0511 22:57:35.679789 35457 api_server.go:51] waiting for apiserver process to appear ...
I0511 22:57:35.679833 35457 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0511 22:57:35.689889 35457 api_server.go:71] duration metric: took 10.100742ms to wait for apiserver process to appear ...
I0511 22:57:35.689909 35457 api_server.go:87] waiting for apiserver healthz status ...
I0511 22:57:35.689917 35457 api_server.go:240] Checking apiserver healthz at https://192.168.49.2:8441/healthz ...
I0511 22:57:35.696026 35457 api_server.go:266] https://192.168.49.2:8441/healthz returned 200:
ok
I0511 22:57:35.702712 35457 api_server.go:140] control plane version: v1.23.5
I0511 22:57:35.702727 35457 api_server.go:130] duration metric: took 12.814549ms to wait for apiserver health ...
I0511 22:57:35.702735 35457 cni.go:95] Creating CNI manager for ""
I0511 22:57:35.702741 35457 cni.go:169] CNI unnecessary in this configuration, recommending no CNI
I0511 22:57:35.702746 35457 system_pods.go:43] waiting for kube-system pods to appear ...
I0511 22:57:35.711211 35457 system_pods.go:59] 7 kube-system pods found
I0511 22:57:35.711232 35457 system_pods.go:61] "coredns-64897985d-rp5lc" [88948fa9-8a33-48b9-b0c6-4e9a46669f71] Running
I0511 22:57:35.711241 35457 system_pods.go:61] "etcd-functional-20220511225632-7294" [f312edb5-af11-4192-85ec-7695ee2b2a25] Running
I0511 22:57:35.711247 35457 system_pods.go:61] "kube-apiserver-functional-20220511225632-7294" [a4f35d6f-dcbc-443e-8e7e-1fc37b28b6b5] Running
I0511 22:57:35.711253 35457 system_pods.go:61] "kube-controller-manager-functional-20220511225632-7294" [61c4db7c-be56-4869-8785-302ce3fce852] Running
I0511 22:57:35.711259 35457 system_pods.go:61] "kube-proxy-dvl88" [2a0a0c62-17ad-4a54-a1c2-c7afd88c9c38] Running
I0511 22:57:35.711271 35457 system_pods.go:61] "kube-scheduler-functional-20220511225632-7294" [e0571f38-423a-457a-aefb-b2857ed93938] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I0511 22:57:35.711280 35457 system_pods.go:61] "storage-provisioner" [13d6b36a-da63-427d-9a0c-67cc25dc9131] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I0511 22:57:35.711285 35457 system_pods.go:74] duration metric: took 8.534921ms to wait for pod list to return data ...
I0511 22:57:35.711294 35457 node_conditions.go:102] verifying NodePressure condition ...
I0511 22:57:35.714732 35457 node_conditions.go:122] node storage ephemeral capacity is 304695084Ki
I0511 22:57:35.714750 35457 node_conditions.go:123] node cpu capacity is 8
I0511 22:57:35.714763 35457 node_conditions.go:105] duration metric: took 3.464684ms to run NodePressure ...
I0511 22:57:35.714783 35457 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.23.5:$PATH" kubeadm init phase addon all --config /var/tmp/minikube/kubeadm.yaml"
I0511 22:57:36.258598 35457 kubeadm.go:737] waiting for restarted kubelet to initialise ...
I0511 22:57:36.264138 35457 kubeadm.go:752] kubelet initialised
I0511 22:57:36.264152 35457 kubeadm.go:753] duration metric: took 5.535509ms waiting for restarted kubelet to initialise ...
I0511 22:57:36.264159 35457 pod_ready.go:35] extra waiting up to 4m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0511 22:57:36.269834 35457 pod_ready.go:78] waiting up to 4m0s for pod "coredns-64897985d-rp5lc" in "kube-system" namespace to be "Ready" ...
I0511 22:57:36.275460 35457 pod_ready.go:97] node "functional-20220511225632-7294" hosting pod "coredns-64897985d-rp5lc" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-20220511225632-7294" has status "Ready":"False"
I0511 22:57:36.275473 35457 pod_ready.go:81] duration metric: took 5.626441ms waiting for pod "coredns-64897985d-rp5lc" in "kube-system" namespace to be "Ready" ...
E0511 22:57:36.275483 35457 pod_ready.go:66] WaitExtra: waitPodCondition: node "functional-20220511225632-7294" hosting pod "coredns-64897985d-rp5lc" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-20220511225632-7294" has status "Ready":"False"
I0511 22:57:36.275508 35457 pod_ready.go:78] waiting up to 4m0s for pod "etcd-functional-20220511225632-7294" in "kube-system" namespace to be "Ready" ...
I0511 22:57:36.280386 35457 pod_ready.go:97] node "functional-20220511225632-7294" hosting pod "etcd-functional-20220511225632-7294" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-20220511225632-7294" has status "Ready":"False"
I0511 22:57:36.280398 35457 pod_ready.go:81] duration metric: took 4.881858ms waiting for pod "etcd-functional-20220511225632-7294" in "kube-system" namespace to be "Ready" ...
E0511 22:57:36.280408 35457 pod_ready.go:66] WaitExtra: waitPodCondition: node "functional-20220511225632-7294" hosting pod "etcd-functional-20220511225632-7294" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-20220511225632-7294" has status "Ready":"False"
I0511 22:57:36.280439 35457 pod_ready.go:78] waiting up to 4m0s for pod "kube-apiserver-functional-20220511225632-7294" in "kube-system" namespace to be "Ready" ...
I0511 22:57:36.284840 35457 pod_ready.go:97] node "functional-20220511225632-7294" hosting pod "kube-apiserver-functional-20220511225632-7294" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-20220511225632-7294" has status "Ready":"False"
I0511 22:57:36.284852 35457 pod_ready.go:81] duration metric: took 4.405653ms waiting for pod "kube-apiserver-functional-20220511225632-7294" in "kube-system" namespace to be "Ready" ...
E0511 22:57:36.284859 35457 pod_ready.go:66] WaitExtra: waitPodCondition: node "functional-20220511225632-7294" hosting pod "kube-apiserver-functional-20220511225632-7294" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-20220511225632-7294" has status "Ready":"False"
I0511 22:57:36.284880 35457 pod_ready.go:78] waiting up to 4m0s for pod "kube-controller-manager-functional-20220511225632-7294" in "kube-system" namespace to be "Ready" ...
I0511 22:57:36.289473 35457 pod_ready.go:97] node "functional-20220511225632-7294" hosting pod "kube-controller-manager-functional-20220511225632-7294" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-20220511225632-7294" has status "Ready":"False"
I0511 22:57:36.289489 35457 pod_ready.go:81] duration metric: took 4.600784ms waiting for pod "kube-controller-manager-functional-20220511225632-7294" in "kube-system" namespace to be "Ready" ...
E0511 22:57:36.289499 35457 pod_ready.go:66] WaitExtra: waitPodCondition: node "functional-20220511225632-7294" hosting pod "kube-controller-manager-functional-20220511225632-7294" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-20220511225632-7294" has status "Ready":"False"
I0511 22:57:36.289524 35457 pod_ready.go:78] waiting up to 4m0s for pod "kube-proxy-dvl88" in "kube-system" namespace to be "Ready" ...
I0511 22:57:36.661705 35457 pod_ready.go:92] pod "kube-proxy-dvl88" in "kube-system" namespace has status "Ready":"True"
I0511 22:57:36.661715 35457 pod_ready.go:81] duration metric: took 372.183894ms waiting for pod "kube-proxy-dvl88" in "kube-system" namespace to be "Ready" ...
I0511 22:57:36.661728 35457 pod_ready.go:78] waiting up to 4m0s for pod "kube-scheduler-functional-20220511225632-7294" in "kube-system" namespace to be "Ready" ...
I0511 22:57:38.063249 35457 pod_ready.go:97] error getting pod "kube-scheduler-functional-20220511225632-7294" in "kube-system" namespace (skipping!): Get "https://192.168.49.2:8441/api/v1/namespaces/kube-system/pods/kube-scheduler-functional-20220511225632-7294": dial tcp 192.168.49.2:8441: connect: connection refused
I0511 22:57:38.063275 35457 pod_ready.go:81] duration metric: took 1.40153873s waiting for pod "kube-scheduler-functional-20220511225632-7294" in "kube-system" namespace to be "Ready" ...
E0511 22:57:38.063287 35457 pod_ready.go:66] WaitExtra: waitPodCondition: error getting pod "kube-scheduler-functional-20220511225632-7294" in "kube-system" namespace (skipping!): Get "https://192.168.49.2:8441/api/v1/namespaces/kube-system/pods/kube-scheduler-functional-20220511225632-7294": dial tcp 192.168.49.2:8441: connect: connection refused
I0511 22:57:38.063307 35457 pod_ready.go:38] duration metric: took 1.799139276s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0511 22:57:38.063321 35457 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
W0511 22:57:38.071117 35457 kubeadm.go:761] unable to adjust resource limits: oom_adj check cmd /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj". : /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj": Process exited with status 1
stdout:
stderr:
cat: /proc//oom_adj: No such file or directory
I0511 22:57:38.071133 35457 kubeadm.go:605] restartCluster took 8.771583459s
I0511 22:57:38.071139 35457 kubeadm.go:393] StartCluster complete in 8.809261538s
I0511 22:57:38.071156 35457 settings.go:142] acquiring lock: {Name:mk1287875a6024bfdfd8882975fa4d7c31d85e31 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0511 22:57:38.071259 35457 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/kubeconfig
I0511 22:57:38.071834 35457 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/kubeconfig: {Name:mka611e3c6ccae6ff6a6751a4f0fde8a6d2789a9 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
W0511 22:57:38.073027 35457 kapi.go:226] failed getting deployment scale, will retry: Get "https://192.168.49.2:8441/apis/apps/v1/namespaces/kube-system/deployments/coredns/scale": dial tcp 192.168.49.2:8441: connect: connection refused
I0511 22:57:40.309257 35457 kapi.go:244] deployment "coredns" in namespace "kube-system" and context "functional-20220511225632-7294" rescaled to 1
I0511 22:57:40.309329 35457 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.23.5/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I0511 22:57:40.309440 35457 start.go:208] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.23.5 ContainerRuntime:docker ControlPlane:true Worker:true}
I0511 22:57:40.309708 35457 config.go:178] Loaded profile config "functional-20220511225632-7294": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.23.5
I0511 22:57:40.312831 35457 out.go:177] * Verifying Kubernetes components...
I0511 22:57:40.309821 35457 addons.go:415] enableAddons start: toEnable=map[ambassador:false auto-pause:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false helm-tiller:false ingress:false ingress-dns:false istio:false istio-provisioner:false kong:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false volumesnapshots:false], additional=[]
I0511 22:57:40.314249 35457 addons.go:65] Setting storage-provisioner=true in profile "functional-20220511225632-7294"
I0511 22:57:40.314262 35457 addons.go:153] Setting addon storage-provisioner=true in "functional-20220511225632-7294"
W0511 22:57:40.314267 35457 addons.go:165] addon storage-provisioner should already be in state true
I0511 22:57:40.314265 35457 addons.go:65] Setting default-storageclass=true in profile "functional-20220511225632-7294"
I0511 22:57:40.314278 35457 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0511 22:57:40.314283 35457 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "functional-20220511225632-7294"
I0511 22:57:40.314303 35457 host.go:66] Checking if "functional-20220511225632-7294" exists ...
I0511 22:57:40.314550 35457 cli_runner.go:164] Run: docker container inspect functional-20220511225632-7294 --format={{.State.Status}}
I0511 22:57:40.314647 35457 cli_runner.go:164] Run: docker container inspect functional-20220511225632-7294 --format={{.State.Status}}
I0511 22:57:40.349957 35457 out.go:177] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0511 22:57:40.351574 35457 addons.go:348] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0511 22:57:40.351582 35457 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0511 22:57:40.351624 35457 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-20220511225632-7294
I0511 22:57:40.399936 35457 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:49167 SSHKeyPath:/home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/machines/functional-20220511225632-7294/id_rsa Username:docker}
I0511 22:57:40.468752 35457 addons.go:153] Setting addon default-storageclass=true in "functional-20220511225632-7294"
W0511 22:57:40.468770 35457 addons.go:165] addon default-storageclass should already be in state true
I0511 22:57:40.468802 35457 host.go:66] Checking if "functional-20220511225632-7294" exists ...
I0511 22:57:40.469194 35457 cli_runner.go:164] Run: docker container inspect functional-20220511225632-7294 --format={{.State.Status}}
I0511 22:57:40.506296 35457 addons.go:348] installing /etc/kubernetes/addons/storageclass.yaml
I0511 22:57:40.506307 35457 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0511 22:57:40.506349 35457 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-20220511225632-7294
I0511 22:57:40.538750 35457 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:49167 SSHKeyPath:/home/jenkins/minikube-integration/linux-amd64-docker-docker-13639-3547-60328d4d40a11ac7c18c6243f597bcfbb3050148/.minikube/machines/functional-20220511225632-7294/id_rsa Username:docker}
I0511 22:57:40.588255 35457 start.go:795] CoreDNS already contains "host.minikube.internal" host record, skipping...
I0511 22:57:40.588252 35457 node_ready.go:35] waiting up to 6m0s for node "functional-20220511225632-7294" to be "Ready" ...
I0511 22:57:40.590936 35457 node_ready.go:49] node "functional-20220511225632-7294" has status "Ready":"True"
I0511 22:57:40.590945 35457 node_ready.go:38] duration metric: took 2.673051ms waiting for node "functional-20220511225632-7294" to be "Ready" ...
I0511 22:57:40.590954 35457 pod_ready.go:35] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0511 22:57:40.597044 35457 pod_ready.go:78] waiting up to 6m0s for pod "coredns-64897985d-rp5lc" in "kube-system" namespace to be "Ready" ...
I0511 22:57:40.601060 35457 pod_ready.go:92] pod "coredns-64897985d-rp5lc" in "kube-system" namespace has status "Ready":"True"
I0511 22:57:40.601067 35457 pod_ready.go:81] duration metric: took 4.00719ms waiting for pod "coredns-64897985d-rp5lc" in "kube-system" namespace to be "Ready" ...
I0511 22:57:40.601074 35457 pod_ready.go:78] waiting up to 6m0s for pod "etcd-functional-20220511225632-7294" in "kube-system" namespace to be "Ready" ...
I0511 22:57:40.604731 35457 pod_ready.go:92] pod "etcd-functional-20220511225632-7294" in "kube-system" namespace has status "Ready":"True"
I0511 22:57:40.604741 35457 pod_ready.go:81] duration metric: took 3.660833ms waiting for pod "etcd-functional-20220511225632-7294" in "kube-system" namespace to be "Ready" ...
I0511 22:57:40.604751 35457 pod_ready.go:78] waiting up to 6m0s for pod "kube-controller-manager-functional-20220511225632-7294" in "kube-system" namespace to be "Ready" ...
I0511 22:57:40.608733 35457 pod_ready.go:92] pod "kube-controller-manager-functional-20220511225632-7294" in "kube-system" namespace has status "Ready":"True"
I0511 22:57:40.608742 35457 pod_ready.go:81] duration metric: took 3.985273ms waiting for pod "kube-controller-manager-functional-20220511225632-7294" in "kube-system" namespace to be "Ready" ...
I0511 22:57:40.608749 35457 pod_ready.go:78] waiting up to 6m0s for pod "kube-proxy-dvl88" in "kube-system" namespace to be "Ready" ...
I0511 22:57:40.656204 35457 pod_ready.go:92] pod "kube-proxy-dvl88" in "kube-system" namespace has status "Ready":"True"
I0511 22:57:40.656215 35457 pod_ready.go:81] duration metric: took 47.460341ms waiting for pod "kube-proxy-dvl88" in "kube-system" namespace to be "Ready" ...
I0511 22:57:40.656226 35457 pod_ready.go:78] waiting up to 6m0s for pod "kube-scheduler-functional-20220511225632-7294" in "kube-system" namespace to be "Ready" ...
I0511 22:57:40.668168 35457 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.23.5/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0511 22:57:40.668425 35457 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.23.5/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0511 22:57:41.383828 35457 out.go:177] * Enabled addons: default-storageclass, storage-provisioner
I0511 22:57:41.385658 35457 addons.go:417] enableAddons completed in 1.075835131s
I0511 22:57:42.997546 35457 pod_ready.go:102] pod "kube-scheduler-functional-20220511225632-7294" in "kube-system" namespace has status "Ready":"False"
I0511 22:57:45.496973 35457 pod_ready.go:102] pod "kube-scheduler-functional-20220511225632-7294" in "kube-system" namespace has status "Ready":"False"
I0511 22:57:47.997491 35457 pod_ready.go:102] pod "kube-scheduler-functional-20220511225632-7294" in "kube-system" namespace has status "Ready":"False"
I0511 22:57:49.996931 35457 pod_ready.go:92] pod "kube-scheduler-functional-20220511225632-7294" in "kube-system" namespace has status "Ready":"True"
I0511 22:57:49.996949 35457 pod_ready.go:81] duration metric: took 9.34071682s waiting for pod "kube-scheduler-functional-20220511225632-7294" in "kube-system" namespace to be "Ready" ...
I0511 22:57:49.996958 35457 pod_ready.go:38] duration metric: took 9.405994611s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0511 22:57:49.996976 35457 api_server.go:51] waiting for apiserver process to appear ...
I0511 22:57:49.997025 35457 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0511 22:57:50.006522 35457 api_server.go:71] duration metric: took 9.697053726s to wait for apiserver process to appear ...
I0511 22:57:50.006537 35457 api_server.go:87] waiting for apiserver healthz status ...
I0511 22:57:50.006545 35457 api_server.go:240] Checking apiserver healthz at https://192.168.49.2:8441/healthz ...
I0511 22:57:50.011202 35457 api_server.go:266] https://192.168.49.2:8441/healthz returned 200:
ok
I0511 22:57:50.012030 35457 api_server.go:140] control plane version: v1.23.5
I0511 22:57:50.012041 35457 api_server.go:130] duration metric: took 5.499631ms to wait for apiserver health ...
I0511 22:57:50.012048 35457 system_pods.go:43] waiting for kube-system pods to appear ...
I0511 22:57:50.017642 35457 system_pods.go:59] 7 kube-system pods found
I0511 22:57:50.017659 35457 system_pods.go:61] "coredns-64897985d-rp5lc" [88948fa9-8a33-48b9-b0c6-4e9a46669f71] Running
I0511 22:57:50.017666 35457 system_pods.go:61] "etcd-functional-20220511225632-7294" [f312edb5-af11-4192-85ec-7695ee2b2a25] Running
I0511 22:57:50.017677 35457 system_pods.go:61] "kube-apiserver-functional-20220511225632-7294" [94691691-fe44-4ba7-9eb3-9e2887c77fb3] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I0511 22:57:50.017697 35457 system_pods.go:61] "kube-controller-manager-functional-20220511225632-7294" [61c4db7c-be56-4869-8785-302ce3fce852] Running
I0511 22:57:50.017705 35457 system_pods.go:61] "kube-proxy-dvl88" [2a0a0c62-17ad-4a54-a1c2-c7afd88c9c38] Running
I0511 22:57:50.017711 35457 system_pods.go:61] "kube-scheduler-functional-20220511225632-7294" [e0571f38-423a-457a-aefb-b2857ed93938] Running
I0511 22:57:50.017717 35457 system_pods.go:61] "storage-provisioner" [13d6b36a-da63-427d-9a0c-67cc25dc9131] Running
I0511 22:57:50.017722 35457 system_pods.go:74] duration metric: took 5.669565ms to wait for pod list to return data ...
I0511 22:57:50.017730 35457 default_sa.go:34] waiting for default service account to be created ...
I0511 22:57:50.020156 35457 default_sa.go:45] found service account: "default"
I0511 22:57:50.020168 35457 default_sa.go:55] duration metric: took 2.4325ms for default service account to be created ...
I0511 22:57:50.020175 35457 system_pods.go:116] waiting for k8s-apps to be running ...
I0511 22:57:50.025108 35457 system_pods.go:86] 7 kube-system pods found
I0511 22:57:50.025121 35457 system_pods.go:89] "coredns-64897985d-rp5lc" [88948fa9-8a33-48b9-b0c6-4e9a46669f71] Running
I0511 22:57:50.025126 35457 system_pods.go:89] "etcd-functional-20220511225632-7294" [f312edb5-af11-4192-85ec-7695ee2b2a25] Running
I0511 22:57:50.025132 35457 system_pods.go:89] "kube-apiserver-functional-20220511225632-7294" [94691691-fe44-4ba7-9eb3-9e2887c77fb3] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I0511 22:57:50.025137 35457 system_pods.go:89] "kube-controller-manager-functional-20220511225632-7294" [61c4db7c-be56-4869-8785-302ce3fce852] Running
I0511 22:57:50.025141 35457 system_pods.go:89] "kube-proxy-dvl88" [2a0a0c62-17ad-4a54-a1c2-c7afd88c9c38] Running
I0511 22:57:50.025145 35457 system_pods.go:89] "kube-scheduler-functional-20220511225632-7294" [e0571f38-423a-457a-aefb-b2857ed93938] Running
I0511 22:57:50.025148 35457 system_pods.go:89] "storage-provisioner" [13d6b36a-da63-427d-9a0c-67cc25dc9131] Running
I0511 22:57:50.025153 35457 system_pods.go:126] duration metric: took 4.974177ms to wait for k8s-apps to be running ...
I0511 22:57:50.025158 35457 system_svc.go:44] waiting for kubelet service to be running ....
I0511 22:57:50.025195 35457 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0511 22:57:50.035429 35457 system_svc.go:56] duration metric: took 10.2619ms WaitForService to wait for kubelet.
I0511 22:57:50.035446 35457 kubeadm.go:548] duration metric: took 9.725983142s to wait for : map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] ...
I0511 22:57:50.035467 35457 node_conditions.go:102] verifying NodePressure condition ...
I0511 22:57:50.038272 35457 node_conditions.go:122] node storage ephemeral capacity is 304695084Ki
I0511 22:57:50.038283 35457 node_conditions.go:123] node cpu capacity is 8
I0511 22:57:50.038292 35457 node_conditions.go:105] duration metric: took 2.82232ms to run NodePressure ...
I0511 22:57:50.038300 35457 start.go:213] waiting for startup goroutines ...
I0511 22:57:50.077195 35457 start.go:499] kubectl: 1.24.0, cluster: 1.23.5 (minor skew: 1)
I0511 22:57:50.079412 35457 out.go:177] * Done! kubectl is now configured to use "functional-20220511225632-7294" cluster and "default" namespace by default
*
* ==> Docker <==
* -- Logs begin at Wed 2022-05-11 22:56:41 UTC, end at Wed 2022-05-11 22:57:51 UTC. --
May 11 22:56:43 functional-20220511225632-7294 dockerd[492]: time="2022-05-11T22:56:43.267823223Z" level=info msg="Docker daemon" commit=4433bf6 graphdriver(s)=overlay2 version=20.10.15
May 11 22:56:43 functional-20220511225632-7294 dockerd[492]: time="2022-05-11T22:56:43.267884476Z" level=info msg="Daemon has completed initialization"
May 11 22:56:43 functional-20220511225632-7294 systemd[1]: Started Docker Application Container Engine.
May 11 22:56:43 functional-20220511225632-7294 dockerd[492]: time="2022-05-11T22:56:43.285562381Z" level=info msg="API listen on [::]:2376"
May 11 22:56:43 functional-20220511225632-7294 dockerd[492]: time="2022-05-11T22:56:43.288863281Z" level=info msg="API listen on /var/run/docker.sock"
May 11 22:57:16 functional-20220511225632-7294 dockerd[492]: time="2022-05-11T22:57:16.648989102Z" level=info msg="ignoring event" container=a352b344b03e0847dcc7d67fe332bfd0bee4c4e9778b986e381a1e1ef8447235 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
May 11 22:57:16 functional-20220511225632-7294 dockerd[492]: time="2022-05-11T22:57:16.705127003Z" level=info msg="ignoring event" container=96f3c0503a198e6ba5bfec831fea4b37abfe00b89b7035fc53f2b80793d1fb5c module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
May 11 22:57:29 functional-20220511225632-7294 dockerd[492]: time="2022-05-11T22:57:29.403756376Z" level=info msg="ignoring event" container=16e87e1858eb49349405b242b8fa1d021f4b37a7b72658ac4af2496a2f28a964 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
May 11 22:57:29 functional-20220511225632-7294 dockerd[492]: time="2022-05-11T22:57:29.569327395Z" level=info msg="ignoring event" container=34aff32b4412f2530e54bdebed564f919ccfb71156444d777a5e89115d5c1878 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
May 11 22:57:29 functional-20220511225632-7294 dockerd[492]: time="2022-05-11T22:57:29.570173329Z" level=info msg="ignoring event" container=1221efbd7e06205471df7e2a383d7d79fef058b46d33a14e5d2d34cfa1d6e077 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
May 11 22:57:29 functional-20220511225632-7294 dockerd[492]: time="2022-05-11T22:57:29.578243227Z" level=info msg="ignoring event" container=78cac41764a33fd0c605b41880fb3cee305fee97b787c7259d30cbda3d7483b9 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
May 11 22:57:29 functional-20220511225632-7294 dockerd[492]: time="2022-05-11T22:57:29.578616150Z" level=info msg="ignoring event" container=6d0ac8abd5b03a3205a1521cb68618a95b744f73ebc5b7c7adb45da3fc3e82cf module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
May 11 22:57:29 functional-20220511225632-7294 dockerd[492]: time="2022-05-11T22:57:29.579312284Z" level=info msg="ignoring event" container=215089bd9ae9aae649433e1fbab73912baae419bfd4d753707845504f3b98843 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
May 11 22:57:29 functional-20220511225632-7294 dockerd[492]: time="2022-05-11T22:57:29.581375152Z" level=info msg="ignoring event" container=d142d56ab3a59e9bc5ffa4bac88a96a743d009c19c1d036affbcf987de718b53 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
May 11 22:57:29 functional-20220511225632-7294 dockerd[492]: time="2022-05-11T22:57:29.656873858Z" level=info msg="ignoring event" container=49e2085959b29b1ea1e31a45ccecb0da571df49c811e4d2aabf115912c00cc89 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
May 11 22:57:29 functional-20220511225632-7294 dockerd[492]: time="2022-05-11T22:57:29.657294850Z" level=info msg="ignoring event" container=5960bc7b51877b0a163536ae35120f477a43cbd368b4f366695bcdfb55621d4d module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
May 11 22:57:29 functional-20220511225632-7294 dockerd[492]: time="2022-05-11T22:57:29.657338127Z" level=info msg="ignoring event" container=df57c21cb948c97c4113c0f5044c0414122d9e80e695a3ca6171e6f0c9289167 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
May 11 22:57:29 functional-20220511225632-7294 dockerd[492]: time="2022-05-11T22:57:29.657361943Z" level=info msg="ignoring event" container=ad029eb198e2339bcbbcdef49480b3a0be286031c9f34931dacae1e809941ac1 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
May 11 22:57:29 functional-20220511225632-7294 dockerd[492]: time="2022-05-11T22:57:29.979002596Z" level=info msg="ignoring event" container=c1806e43aa640eaf138d5f05d3744a10e684dc88df1bb600c82cc16fe608c887 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
May 11 22:57:30 functional-20220511225632-7294 dockerd[492]: time="2022-05-11T22:57:30.056241537Z" level=info msg="ignoring event" container=eb08f8db1d6e27e36a666d8405b52ad35d7e7adaef728db64bd3e79de223bb55 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
May 11 22:57:30 functional-20220511225632-7294 dockerd[492]: time="2022-05-11T22:57:30.773013963Z" level=info msg="ignoring event" container=371f9ddf15672c187f5cb7d898615742ac9ecc66ab0e259c357ef2f14c2fdb9e module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
May 11 22:57:34 functional-20220511225632-7294 dockerd[492]: time="2022-05-11T22:57:34.575406995Z" level=info msg="ignoring event" container=f70fad5bd078cf07f7e7ed8154916362deba1c8b5983fc855cd901d1464cf436 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
May 11 22:57:37 functional-20220511225632-7294 dockerd[492]: time="2022-05-11T22:57:37.284040875Z" level=info msg="ignoring event" container=a5c57b0b7b34c206536ae62d8e231382b1cc2342e88a753b577c373d438d932c module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
May 11 22:57:38 functional-20220511225632-7294 dockerd[492]: time="2022-05-11T22:57:38.009231466Z" level=info msg="ignoring event" container=067bc9c4a9736cec4cf56addc23c1078a5ac3746f2688acc12b69209072c0250 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
May 11 22:57:38 functional-20220511225632-7294 dockerd[492]: time="2022-05-11T22:57:38.068700597Z" level=info msg="ignoring event" container=0c161d1f0fc54e20a77d39b045aad7b0cc1f936dd47835514ec3de43d767b404 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
*
* ==> container status <==
* CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID
90b845a534869 6e38f40d628db 9 seconds ago Running storage-provisioner 2 d5147b97b930d
625a39b81866c a4ca41631cc7a 9 seconds ago Running coredns 1 7d04e67054299
640a51c740165 3fc1d62d65872 13 seconds ago Running kube-apiserver 1 1bc5da56420aa
a5c57b0b7b34c 3fc1d62d65872 14 seconds ago Exited kube-apiserver 0 1bc5da56420aa
371f9ddf15672 6e38f40d628db 21 seconds ago Exited storage-provisioner 1 d5147b97b930d
52e87935cb774 884d49d6d8c9f 21 seconds ago Running kube-scheduler 1 1540a8f1f51f7
eeedd61097726 25f8c7f3da61c 21 seconds ago Running etcd 1 5e995562ac047
56d11119fb846 b0c9e5e4dbb14 21 seconds ago Running kube-controller-manager 1 7479e200b6258
5b836b9c5be8b 3c53fa8541f95 21 seconds ago Running kube-proxy 1 44718ebc0a76f
f70fad5bd078c a4ca41631cc7a 41 seconds ago Exited coredns 0 d142d56ab3a59
78cac41764a33 3c53fa8541f95 42 seconds ago Exited kube-proxy 0 1221efbd7e062
ad029eb198e23 b0c9e5e4dbb14 About a minute ago Exited kube-controller-manager 0 49e2085959b29
c1806e43aa640 884d49d6d8c9f About a minute ago Exited kube-scheduler 0 215089bd9ae9a
5960bc7b51877 25f8c7f3da61c About a minute ago Exited etcd 0 df57c21cb948c
*
* ==> coredns [625a39b81866] <==
* .:53
[INFO] plugin/reload: Running configuration MD5 = cec3c60eb1cc4909fd4579a8d79ea031
CoreDNS-1.8.6
linux/amd64, go1.17.1, 13a9191
*
* ==> coredns [f70fad5bd078] <==
* .:53
[INFO] plugin/reload: Running configuration MD5 = db32ca3650231d74073ff4cf814959a7
CoreDNS-1.8.6
linux/amd64, go1.17.1, 13a9191
[INFO] SIGTERM: Shutting down servers then terminating
[INFO] plugin/health: Going into lameduck mode for 5s
*
* ==> describe nodes <==
* Name: functional-20220511225632-7294
Roles: control-plane,master
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=functional-20220511225632-7294
kubernetes.io/os=linux
minikube.k8s.io/commit=50a7977b568d2ad3e04003527a57f4502d6177a0
minikube.k8s.io/name=functional-20220511225632-7294
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2022_05_11T22_56_55_0700
minikube.k8s.io/version=v1.25.2
node-role.kubernetes.io/control-plane=
node-role.kubernetes.io/master=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /var/run/dockershim.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Wed, 11 May 2022 22:56:52 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: functional-20220511225632-7294
AcquireTime: <unset>
RenewTime: Wed, 11 May 2022 22:57:45 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Wed, 11 May 2022 22:57:36 +0000 Wed, 11 May 2022 22:56:50 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Wed, 11 May 2022 22:57:36 +0000 Wed, 11 May 2022 22:56:50 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Wed, 11 May 2022 22:57:36 +0000 Wed, 11 May 2022 22:56:50 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Wed, 11 May 2022 22:57:36 +0000 Wed, 11 May 2022 22:57:36 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.2
Hostname: functional-20220511225632-7294
Capacity:
cpu: 8
ephemeral-storage: 304695084Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32873824Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304695084Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32873824Ki
pods: 110
System Info:
Machine ID: 8556a0a9a0e64ba4b825f672d2dce0b9
System UUID: 5880ed1f-c668-484b-8a7c-800dbd789255
Boot ID: 606a2383-21e3-4a1f-9ace-302a4c5cda25
Kernel Version: 5.13.0-1025-gcp
OS Image: Ubuntu 20.04.4 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://20.10.15
Kubelet Version: v1.23.5
Kube-Proxy Version: v1.23.5
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (7 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
kube-system coredns-64897985d-rp5lc 100m (1%!)(MISSING) 0 (0%!)(MISSING) 70Mi (0%!)(MISSING) 170Mi (0%!)(MISSING) 43s
kube-system etcd-functional-20220511225632-7294 100m (1%!)(MISSING) 0 (0%!)(MISSING) 100Mi (0%!)(MISSING) 0 (0%!)(MISSING) 56s
kube-system kube-apiserver-functional-20220511225632-7294 250m (3%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 10s
kube-system kube-controller-manager-functional-20220511225632-7294 200m (2%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 58s
kube-system kube-proxy-dvl88 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 43s
kube-system kube-scheduler-functional-20220511225632-7294 100m (1%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 56s
kube-system storage-provisioner 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 40s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 750m (9%!)(MISSING) 0 (0%!)(MISSING)
memory 170Mi (0%!)(MISSING) 170Mi (0%!)(MISSING)
ephemeral-storage 0 (0%!)(MISSING) 0 (0%!)(MISSING)
hugepages-1Gi 0 (0%!)(MISSING) 0 (0%!)(MISSING)
hugepages-2Mi 0 (0%!)(MISSING) 0 (0%!)(MISSING)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 15s kube-proxy
Normal Starting 41s kube-proxy
Normal NodeHasSufficientMemory 56s kubelet Node functional-20220511225632-7294 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 56s kubelet Node functional-20220511225632-7294 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 56s kubelet Node functional-20220511225632-7294 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 56s kubelet Updated Node Allocatable limit across pods
Normal Starting 56s kubelet Starting kubelet.
Normal NodeReady 45s kubelet Node functional-20220511225632-7294 status is now: NodeReady
Normal Starting 16s kubelet Starting kubelet.
Normal NodeHasNoDiskPressure 16s kubelet Node functional-20220511225632-7294 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 16s kubelet Node functional-20220511225632-7294 status is now: NodeHasSufficientPID
Normal NodeNotReady 16s kubelet Node functional-20220511225632-7294 status is now: NodeNotReady
Normal NodeHasSufficientMemory 16s kubelet Node functional-20220511225632-7294 status is now: NodeHasSufficientMemory
Normal NodeAllocatableEnforced 15s kubelet Updated Node Allocatable limit across pods
Normal NodeReady 15s kubelet Node functional-20220511225632-7294 status is now: NodeReady
*
* ==> dmesg <==
* [May11 22:17] #2
[ +0.001730] #3
[ +0.000877] #4
[ +0.003053] MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.
[ +0.001948] TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.
[ +0.001470] #5
[ +0.000769] #6
[ +0.003206] #7
[ +0.050833] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended PCI configuration space under this bridge.
[ +0.564823] i8042: Warning: Keylock active
[ +0.010210] platform eisa.0: EISA: Cannot allocate resource for mainboard
[ +0.000726] platform eisa.0: Cannot allocate resource for EISA slot 1
[ +0.000650] platform eisa.0: Cannot allocate resource for EISA slot 2
[ +0.000650] platform eisa.0: Cannot allocate resource for EISA slot 3
[ +0.000722] platform eisa.0: Cannot allocate resource for EISA slot 4
[ +0.000628] platform eisa.0: Cannot allocate resource for EISA slot 5
[ +0.000639] platform eisa.0: Cannot allocate resource for EISA slot 6
[ +0.000704] platform eisa.0: Cannot allocate resource for EISA slot 7
[ +0.000680] platform eisa.0: Cannot allocate resource for EISA slot 8
[ +7.967240] kauditd_printk_skb: 32 callbacks suppressed
*
* ==> etcd [5960bc7b5187] <==
* {"level":"info","ts":"2022-05-11T22:56:50.579Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became pre-candidate at term 1"}
{"level":"info","ts":"2022-05-11T22:56:50.579Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgPreVoteResp from aec36adc501070cc at term 1"}
{"level":"info","ts":"2022-05-11T22:56:50.579Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became candidate at term 2"}
{"level":"info","ts":"2022-05-11T22:56:50.579Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgVoteResp from aec36adc501070cc at term 2"}
{"level":"info","ts":"2022-05-11T22:56:50.579Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became leader at term 2"}
{"level":"info","ts":"2022-05-11T22:56:50.579Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: aec36adc501070cc elected leader aec36adc501070cc at term 2"}
{"level":"info","ts":"2022-05-11T22:56:50.579Z","caller":"etcdserver/server.go:2027","msg":"published local member to cluster through raft","local-member-id":"aec36adc501070cc","local-member-attributes":"{Name:functional-20220511225632-7294 ClientURLs:[https://192.168.49.2:2379]}","request-path":"/0/members/aec36adc501070cc/attributes","cluster-id":"fa54960ea34d58be","publish-timeout":"7s"}
{"level":"info","ts":"2022-05-11T22:56:50.579Z","caller":"embed/serve.go:98","msg":"ready to serve client requests"}
{"level":"info","ts":"2022-05-11T22:56:50.579Z","caller":"embed/serve.go:98","msg":"ready to serve client requests"}
{"level":"info","ts":"2022-05-11T22:56:50.579Z","caller":"etcdmain/main.go:47","msg":"notifying init daemon"}
{"level":"info","ts":"2022-05-11T22:56:50.579Z","caller":"etcdserver/server.go:2476","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2022-05-11T22:56:50.579Z","caller":"etcdmain/main.go:53","msg":"successfully notified init daemon"}
{"level":"info","ts":"2022-05-11T22:56:50.580Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","cluster-version":"3.5"}
{"level":"info","ts":"2022-05-11T22:56:50.580Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2022-05-11T22:56:50.580Z","caller":"etcdserver/server.go:2500","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2022-05-11T22:56:50.580Z","caller":"embed/serve.go:188","msg":"serving client traffic securely","address":"127.0.0.1:2379"}
{"level":"info","ts":"2022-05-11T22:56:50.581Z","caller":"embed/serve.go:188","msg":"serving client traffic securely","address":"192.168.49.2:2379"}
{"level":"info","ts":"2022-05-11T22:57:29.463Z","caller":"osutil/interrupt_unix.go:64","msg":"received signal; shutting down","signal":"terminated"}
{"level":"info","ts":"2022-05-11T22:57:29.463Z","caller":"embed/etcd.go:367","msg":"closing etcd server","name":"functional-20220511225632-7294","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.49.2:2380"],"advertise-client-urls":["https://192.168.49.2:2379"]}
WARNING: 2022/05/11 22:57:29 [core] grpc: addrConn.createTransport failed to connect to {192.168.49.2:2379 192.168.49.2:2379 <nil> 0 <nil>}. Err: connection error: desc = "transport: Error while dialing dial tcp 192.168.49.2:2379: connect: connection refused". Reconnecting...
WARNING: 2022/05/11 22:57:29 [core] grpc: addrConn.createTransport failed to connect to {127.0.0.1:2379 127.0.0.1:2379 <nil> 0 <nil>}. Err: connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
{"level":"info","ts":"2022-05-11T22:57:29.474Z","caller":"etcdserver/server.go:1438","msg":"skipped leadership transfer for single voting member cluster","local-member-id":"aec36adc501070cc","current-leader-member-id":"aec36adc501070cc"}
{"level":"info","ts":"2022-05-11T22:57:29.476Z","caller":"embed/etcd.go:562","msg":"stopping serving peer traffic","address":"192.168.49.2:2380"}
{"level":"info","ts":"2022-05-11T22:57:29.477Z","caller":"embed/etcd.go:567","msg":"stopped serving peer traffic","address":"192.168.49.2:2380"}
{"level":"info","ts":"2022-05-11T22:57:29.477Z","caller":"embed/etcd.go:369","msg":"closed etcd server","name":"functional-20220511225632-7294","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.49.2:2380"],"advertise-client-urls":["https://192.168.49.2:2379"]}
*
* ==> etcd [eeedd6109772] <==
* {"level":"info","ts":"2022-05-11T22:57:30.871Z","caller":"etcdserver/server.go:843","msg":"starting etcd server","local-member-id":"aec36adc501070cc","local-server-version":"3.5.1","cluster-version":"to_be_decided"}
{"level":"info","ts":"2022-05-11T22:57:30.873Z","caller":"etcdserver/server.go:744","msg":"starting initial election tick advance","election-ticks":10}
{"level":"info","ts":"2022-05-11T22:57:30.873Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc switched to configuration voters=(12593026477526642892)"}
{"level":"info","ts":"2022-05-11T22:57:30.873Z","caller":"membership/cluster.go:421","msg":"added member","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","added-peer-id":"aec36adc501070cc","added-peer-peer-urls":["https://192.168.49.2:2380"]}
{"level":"info","ts":"2022-05-11T22:57:30.873Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","cluster-version":"3.5"}
{"level":"info","ts":"2022-05-11T22:57:30.873Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2022-05-11T22:57:30.875Z","caller":"embed/etcd.go:687","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
{"level":"info","ts":"2022-05-11T22:57:30.876Z","caller":"embed/etcd.go:276","msg":"now serving peer/client/metrics","local-member-id":"aec36adc501070cc","initial-advertise-peer-urls":["https://192.168.49.2:2380"],"listen-peer-urls":["https://192.168.49.2:2380"],"advertise-client-urls":["https://192.168.49.2:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.49.2:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
{"level":"info","ts":"2022-05-11T22:57:30.876Z","caller":"embed/etcd.go:762","msg":"serving metrics","address":"http://127.0.0.1:2381"}
{"level":"info","ts":"2022-05-11T22:57:30.876Z","caller":"embed/etcd.go:580","msg":"serving peer traffic","address":"192.168.49.2:2380"}
{"level":"info","ts":"2022-05-11T22:57:30.876Z","caller":"embed/etcd.go:552","msg":"cmux::serve","address":"192.168.49.2:2380"}
{"level":"info","ts":"2022-05-11T22:57:32.164Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc is starting a new election at term 2"}
{"level":"info","ts":"2022-05-11T22:57:32.164Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became pre-candidate at term 2"}
{"level":"info","ts":"2022-05-11T22:57:32.164Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgPreVoteResp from aec36adc501070cc at term 2"}
{"level":"info","ts":"2022-05-11T22:57:32.164Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became candidate at term 3"}
{"level":"info","ts":"2022-05-11T22:57:32.164Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgVoteResp from aec36adc501070cc at term 3"}
{"level":"info","ts":"2022-05-11T22:57:32.164Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became leader at term 3"}
{"level":"info","ts":"2022-05-11T22:57:32.164Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: aec36adc501070cc elected leader aec36adc501070cc at term 3"}
{"level":"info","ts":"2022-05-11T22:57:32.165Z","caller":"etcdserver/server.go:2027","msg":"published local member to cluster through raft","local-member-id":"aec36adc501070cc","local-member-attributes":"{Name:functional-20220511225632-7294 ClientURLs:[https://192.168.49.2:2379]}","request-path":"/0/members/aec36adc501070cc/attributes","cluster-id":"fa54960ea34d58be","publish-timeout":"7s"}
{"level":"info","ts":"2022-05-11T22:57:32.165Z","caller":"embed/serve.go:98","msg":"ready to serve client requests"}
{"level":"info","ts":"2022-05-11T22:57:32.165Z","caller":"embed/serve.go:98","msg":"ready to serve client requests"}
{"level":"info","ts":"2022-05-11T22:57:32.166Z","caller":"etcdmain/main.go:47","msg":"notifying init daemon"}
{"level":"info","ts":"2022-05-11T22:57:32.166Z","caller":"etcdmain/main.go:53","msg":"successfully notified init daemon"}
{"level":"info","ts":"2022-05-11T22:57:32.168Z","caller":"embed/serve.go:188","msg":"serving client traffic securely","address":"192.168.49.2:2379"}
{"level":"info","ts":"2022-05-11T22:57:32.168Z","caller":"embed/serve.go:188","msg":"serving client traffic securely","address":"127.0.0.1:2379"}
*
* ==> kernel <==
* 22:57:51 up 40 min, 0 users, load average: 1.39, 1.00, 0.46
Linux functional-20220511225632-7294 5.13.0-1025-gcp #30~20.04.1-Ubuntu SMP Tue Apr 26 03:01:25 UTC 2022 x86_64 x86_64 x86_64 GNU/Linux
PRETTY_NAME="Ubuntu 20.04.4 LTS"
*
* ==> kube-apiserver [640a51c74016] <==
* I0511 22:57:40.279462 1 naming_controller.go:291] Starting NamingConditionController
I0511 22:57:40.288433 1 apiservice_controller.go:97] Starting APIServiceRegistrationController
I0511 22:57:40.288460 1 cache.go:32] Waiting for caches to sync for APIServiceRegistrationController controller
I0511 22:57:40.288978 1 autoregister_controller.go:141] Starting autoregister controller
I0511 22:57:40.288998 1 cache.go:32] Waiting for caches to sync for autoregister controller
I0511 22:57:40.289022 1 cluster_authentication_trust_controller.go:440] Starting cluster_authentication_trust_controller controller
I0511 22:57:40.289032 1 shared_informer.go:240] Waiting for caches to sync for cluster_authentication_trust_controller
I0511 22:57:40.289078 1 dynamic_cafile_content.go:156] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt"
I0511 22:57:40.289637 1 crdregistration_controller.go:111] Starting crd-autoregister controller
I0511 22:57:40.289724 1 shared_informer.go:240] Waiting for caches to sync for crd-autoregister
I0511 22:57:40.290534 1 dynamic_cafile_content.go:156] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt"
I0511 22:57:40.455099 1 shared_informer.go:247] Caches are synced for cluster_authentication_trust_controller
I0511 22:57:40.455166 1 cache.go:39] Caches are synced for APIServiceRegistrationController controller
I0511 22:57:40.456401 1 cache.go:39] Caches are synced for autoregister controller
I0511 22:57:40.456709 1 shared_informer.go:247] Caches are synced for crd-autoregister
E0511 22:57:40.456733 1 controller.go:157] Error removing old endpoints from kubernetes service: no master IPs were listed in storage, refusing to erase all endpoints for the kubernetes service
I0511 22:57:40.476887 1 apf_controller.go:322] Running API Priority and Fairness config worker
I0511 22:57:40.477110 1 shared_informer.go:247] Caches are synced for node_authorizer
I0511 22:57:40.477113 1 cache.go:39] Caches are synced for AvailableConditionController controller
I0511 22:57:41.276757 1 controller.go:132] OpenAPI AggregationController: action for item : Nothing (removed from the queue).
I0511 22:57:41.276796 1 controller.go:132] OpenAPI AggregationController: action for item k8s_internal_local_delegation_chain_0000000000: Nothing (removed from the queue).
I0511 22:57:41.280537 1 storage_scheduling.go:109] all system priority classes are created successfully or already exist.
I0511 22:57:45.996121 1 controller.go:611] quota admission added evaluator for: leases.coordination.k8s.io
I0511 22:57:46.681702 1 controller.go:611] quota admission added evaluator for: endpoints
I0511 22:57:46.727937 1 controller.go:611] quota admission added evaluator for: endpointslices.discovery.k8s.io
*
* ==> kube-apiserver [a5c57b0b7b34] <==
* I0511 22:57:37.264082 1 server.go:565] external host was not specified, using 192.168.49.2
I0511 22:57:37.264761 1 server.go:172] Version: v1.23.5
E0511 22:57:37.265072 1 run.go:74] "command failed" err="failed to create listener: failed to listen on 0.0.0.0:8441: listen tcp 0.0.0.0:8441: bind: address already in use"
*
* ==> kube-controller-manager [56d11119fb84] <==
* I0511 22:57:46.595569 1 shared_informer.go:247] Caches are synced for PVC protection
I0511 22:57:46.597755 1 shared_informer.go:247] Caches are synced for ephemeral
I0511 22:57:46.607109 1 shared_informer.go:247] Caches are synced for daemon sets
I0511 22:57:46.625289 1 shared_informer.go:247] Caches are synced for ReplicationController
I0511 22:57:46.630990 1 shared_informer.go:247] Caches are synced for resource quota
I0511 22:57:46.632226 1 shared_informer.go:247] Caches are synced for resource quota
I0511 22:57:46.634352 1 shared_informer.go:247] Caches are synced for job
I0511 22:57:46.656770 1 shared_informer.go:247] Caches are synced for disruption
I0511 22:57:46.656801 1 disruption.go:371] Sending events to api server.
I0511 22:57:46.661016 1 shared_informer.go:247] Caches are synced for persistent volume
I0511 22:57:46.663236 1 shared_informer.go:247] Caches are synced for stateful set
I0511 22:57:46.664353 1 shared_informer.go:247] Caches are synced for HPA
I0511 22:57:46.666486 1 shared_informer.go:247] Caches are synced for deployment
I0511 22:57:46.669328 1 shared_informer.go:247] Caches are synced for ReplicaSet
I0511 22:57:46.670540 1 shared_informer.go:247] Caches are synced for endpoint
I0511 22:57:46.672680 1 shared_informer.go:247] Caches are synced for taint
I0511 22:57:46.672723 1 taint_manager.go:187] "Starting NoExecuteTaintManager"
I0511 22:57:46.672751 1 node_lifecycle_controller.go:1397] Initializing eviction metric for zone:
W0511 22:57:46.672830 1 node_lifecycle_controller.go:1012] Missing timestamp for Node functional-20220511225632-7294. Assuming now as a timestamp.
I0511 22:57:46.672867 1 node_lifecycle_controller.go:1213] Controller detected that zone is now in state Normal.
I0511 22:57:46.673158 1 event.go:294] "Event occurred" object="functional-20220511225632-7294" kind="Node" apiVersion="v1" type="Normal" reason="RegisteredNode" message="Node functional-20220511225632-7294 event: Registered Node functional-20220511225632-7294 in Controller"
I0511 22:57:46.676698 1 shared_informer.go:247] Caches are synced for endpoint_slice
I0511 22:57:47.045609 1 shared_informer.go:247] Caches are synced for garbage collector
I0511 22:57:47.104526 1 shared_informer.go:247] Caches are synced for garbage collector
I0511 22:57:47.104559 1 garbagecollector.go:155] Garbage collector: all resource monitors have synced. Proceeding to collect garbage
*
* ==> kube-controller-manager [ad029eb198e2] <==
* I0511 22:57:08.855901 1 shared_informer.go:247] Caches are synced for taint
I0511 22:57:08.855992 1 node_lifecycle_controller.go:1397] Initializing eviction metric for zone:
I0511 22:57:08.856012 1 shared_informer.go:247] Caches are synced for certificate-csrsigning-kube-apiserver-client
W0511 22:57:08.856056 1 node_lifecycle_controller.go:1012] Missing timestamp for Node functional-20220511225632-7294. Assuming now as a timestamp.
I0511 22:57:08.856090 1 node_lifecycle_controller.go:1213] Controller detected that zone is now in state Normal.
I0511 22:57:08.856000 1 shared_informer.go:247] Caches are synced for certificate-csrsigning-legacy-unknown
I0511 22:57:08.856156 1 shared_informer.go:247] Caches are synced for service account
I0511 22:57:08.856201 1 taint_manager.go:187] "Starting NoExecuteTaintManager"
I0511 22:57:08.856210 1 shared_informer.go:247] Caches are synced for expand
I0511 22:57:08.856291 1 event.go:294] "Event occurred" object="functional-20220511225632-7294" kind="Node" apiVersion="v1" type="Normal" reason="RegisteredNode" message="Node functional-20220511225632-7294 event: Registered Node functional-20220511225632-7294 in Controller"
I0511 22:57:08.868144 1 event.go:294] "Event occurred" object="kube-system/coredns" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set coredns-64897985d to 2"
I0511 22:57:08.875474 1 range_allocator.go:374] Set node functional-20220511225632-7294 PodCIDR to [10.244.0.0/24]
I0511 22:57:08.958191 1 event.go:294] "Event occurred" object="kube-system/coredns-64897985d" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-64897985d-nsjmf"
I0511 22:57:08.961935 1 event.go:294] "Event occurred" object="kube-system/kube-proxy" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-dvl88"
I0511 22:57:08.970391 1 event.go:294] "Event occurred" object="kube-system/coredns-64897985d" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-64897985d-rp5lc"
I0511 22:57:08.971981 1 shared_informer.go:247] Caches are synced for cronjob
I0511 22:57:09.055366 1 shared_informer.go:247] Caches are synced for TTL after finished
I0511 22:57:09.055828 1 shared_informer.go:247] Caches are synced for job
I0511 22:57:09.082936 1 shared_informer.go:247] Caches are synced for resource quota
I0511 22:57:09.145249 1 shared_informer.go:247] Caches are synced for resource quota
I0511 22:57:09.190561 1 event.go:294] "Event occurred" object="kube-system/coredns" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-64897985d to 1"
I0511 22:57:09.194319 1 event.go:294] "Event occurred" object="kube-system/coredns-64897985d" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-64897985d-nsjmf"
I0511 22:57:09.465114 1 shared_informer.go:247] Caches are synced for garbage collector
I0511 22:57:09.522683 1 shared_informer.go:247] Caches are synced for garbage collector
I0511 22:57:09.522706 1 garbagecollector.go:155] Garbage collector: all resource monitors have synced. Proceeding to collect garbage
*
* ==> kube-proxy [5b836b9c5be8] <==
* E0511 22:57:30.962881 1 node.go:152] Failed to retrieve node info: Get "https://control-plane.minikube.internal:8441/api/v1/nodes/functional-20220511225632-7294": dial tcp 192.168.49.2:8441: connect: connection refused
E0511 22:57:33.865458 1 node.go:152] Failed to retrieve node info: nodes "functional-20220511225632-7294" is forbidden: User "system:serviceaccount:kube-system:kube-proxy" cannot get resource "nodes" in API group "" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:service-account-issuer-discovery" not found, clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found, clusterrole.rbac.authorization.k8s.io "system:basic-user" not found, clusterrole.rbac.authorization.k8s.io "system:node-proxier" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found]
I0511 22:57:36.158518 1 node.go:163] Successfully retrieved node IP: 192.168.49.2
I0511 22:57:36.158562 1 server_others.go:138] "Detected node IP" address="192.168.49.2"
I0511 22:57:36.158609 1 server_others.go:561] "Unknown proxy mode, assuming iptables proxy" proxyMode=""
I0511 22:57:36.259821 1 server_others.go:206] "Using iptables Proxier"
I0511 22:57:36.259871 1 server_others.go:213] "kube-proxy running in dual-stack mode" ipFamily=IPv4
I0511 22:57:36.259884 1 server_others.go:214] "Creating dualStackProxier for iptables"
I0511 22:57:36.259912 1 server_others.go:491] "Detect-local-mode set to ClusterCIDR, but no IPv6 cluster CIDR defined, , defaulting to no-op detect-local for IPv6"
I0511 22:57:36.260327 1 server.go:656] "Version info" version="v1.23.5"
I0511 22:57:36.261382 1 config.go:317] "Starting service config controller"
I0511 22:57:36.261424 1 shared_informer.go:240] Waiting for caches to sync for service config
I0511 22:57:36.261426 1 config.go:226] "Starting endpoint slice config controller"
I0511 22:57:36.261446 1 shared_informer.go:240] Waiting for caches to sync for endpoint slice config
I0511 22:57:36.361598 1 shared_informer.go:247] Caches are synced for service config
I0511 22:57:36.361608 1 shared_informer.go:247] Caches are synced for endpoint slice config
*
* ==> kube-proxy [78cac41764a3] <==
* I0511 22:57:09.575527 1 node.go:163] Successfully retrieved node IP: 192.168.49.2
I0511 22:57:09.575643 1 server_others.go:138] "Detected node IP" address="192.168.49.2"
I0511 22:57:09.575678 1 server_others.go:561] "Unknown proxy mode, assuming iptables proxy" proxyMode=""
I0511 22:57:09.596944 1 server_others.go:206] "Using iptables Proxier"
I0511 22:57:09.596989 1 server_others.go:213] "kube-proxy running in dual-stack mode" ipFamily=IPv4
I0511 22:57:09.597001 1 server_others.go:214] "Creating dualStackProxier for iptables"
I0511 22:57:09.597026 1 server_others.go:491] "Detect-local-mode set to ClusterCIDR, but no IPv6 cluster CIDR defined, , defaulting to no-op detect-local for IPv6"
I0511 22:57:09.597395 1 server.go:656] "Version info" version="v1.23.5"
I0511 22:57:09.597899 1 config.go:317] "Starting service config controller"
I0511 22:57:09.597917 1 config.go:226] "Starting endpoint slice config controller"
I0511 22:57:09.597918 1 shared_informer.go:240] Waiting for caches to sync for service config
I0511 22:57:09.597927 1 shared_informer.go:240] Waiting for caches to sync for endpoint slice config
I0511 22:57:09.698604 1 shared_informer.go:247] Caches are synced for endpoint slice config
I0511 22:57:09.699044 1 shared_informer.go:247] Caches are synced for service config
*
* ==> kube-scheduler [52e87935cb77] <==
* I0511 22:57:33.865912 1 shared_informer.go:240] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
I0511 22:57:33.865961 1 tlsconfig.go:240] "Starting DynamicServingCertificateController"
E0511 22:57:33.875332 1 plugin.go:138] "getting namespace, assuming empty set of namespace labels" err="namespace \"kube-system\" not found" namespace="kube-system"
E0511 22:57:33.875438 1 plugin.go:138] "getting namespace, assuming empty set of namespace labels" err="namespace \"kube-system\" not found" namespace="kube-system"
E0511 22:57:33.875539 1 plugin.go:138] "getting namespace, assuming empty set of namespace labels" err="namespace \"kube-system\" not found" namespace="kube-system"
E0511 22:57:33.875620 1 plugin.go:138] "getting namespace, assuming empty set of namespace labels" err="namespace \"kube-system\" not found" namespace="kube-system"
E0511 22:57:33.875644 1 plugin.go:138] "getting namespace, assuming empty set of namespace labels" err="namespace \"kube-system\" not found" namespace="kube-system"
E0511 22:57:33.875669 1 plugin.go:138] "getting namespace, assuming empty set of namespace labels" err="namespace \"kube-system\" not found" namespace="kube-system"
E0511 22:57:33.875711 1 plugin.go:138] "getting namespace, assuming empty set of namespace labels" err="namespace \"kube-system\" not found" namespace="kube-system"
I0511 22:57:33.967048 1 shared_informer.go:247] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
E0511 22:57:40.455197 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.StatefulSet: unknown (get statefulsets.apps)
E0511 22:57:40.455356 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Namespace: unknown (get namespaces)
E0511 22:57:40.455421 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Service: unknown (get services)
E0511 22:57:40.455495 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.StorageClass: unknown (get storageclasses.storage.k8s.io)
E0511 22:57:40.455616 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.ReplicaSet: unknown (get replicasets.apps)
E0511 22:57:40.455741 1 reflector.go:138] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:205: Failed to watch *v1.ConfigMap: unknown (get configmaps)
E0511 22:57:40.456169 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.PersistentVolume: unknown (get persistentvolumes)
E0511 22:57:40.456188 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.PersistentVolumeClaim: unknown (get persistentvolumeclaims)
E0511 22:57:40.456204 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Node: unknown (get nodes)
E0511 22:57:40.456273 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSINode: unknown (get csinodes.storage.k8s.io)
E0511 22:57:40.456308 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSIDriver: unknown (get csidrivers.storage.k8s.io)
E0511 22:57:40.456329 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.ReplicationController: unknown (get replicationcontrollers)
E0511 22:57:40.456366 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.PodDisruptionBudget: unknown (get poddisruptionbudgets.policy)
E0511 22:57:40.456461 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1beta1.CSIStorageCapacity: unknown (get csistoragecapacities.storage.k8s.io)
E0511 22:57:40.459030 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Pod: unknown (get pods)
*
* ==> kube-scheduler [c1806e43aa64] <==
* E0511 22:56:52.558344 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
W0511 22:56:52.557457 1 reflector.go:324] k8s.io/client-go/informers/factory.go:134: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E0511 22:56:52.558362 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
W0511 22:56:52.557939 1 reflector.go:324] k8s.io/client-go/informers/factory.go:134: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E0511 22:56:52.558389 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
W0511 22:56:53.306785 1 reflector.go:324] k8s.io/client-go/informers/factory.go:134: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E0511 22:56:53.306827 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
W0511 22:56:53.395711 1 reflector.go:324] k8s.io/client-go/informers/factory.go:134: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E0511 22:56:53.395742 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
W0511 22:56:53.412891 1 reflector.go:324] k8s.io/client-go/informers/factory.go:134: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E0511 22:56:53.412917 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
W0511 22:56:53.424199 1 reflector.go:324] k8s.io/client-go/informers/factory.go:134: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E0511 22:56:53.424238 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
W0511 22:56:53.465695 1 reflector.go:324] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:205: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E0511 22:56:53.465734 1 reflector.go:138] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:205: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
W0511 22:56:53.493541 1 reflector.go:324] k8s.io/client-go/informers/factory.go:134: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E0511 22:56:53.493576 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
W0511 22:56:53.509965 1 reflector.go:324] k8s.io/client-go/informers/factory.go:134: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E0511 22:56:53.509994 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
W0511 22:56:53.592646 1 reflector.go:324] k8s.io/client-go/informers/factory.go:134: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E0511 22:56:53.592683 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
I0511 22:56:56.180600 1 shared_informer.go:247] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
I0511 22:57:29.558782 1 secure_serving.go:311] Stopped listening on 127.0.0.1:10259
I0511 22:57:29.559000 1 tlsconfig.go:255] "Shutting down DynamicServingCertificateController"
I0511 22:57:29.559066 1 configmap_cafile_content.go:222] "Shutting down controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
*
* ==> kubelet <==
* -- Logs begin at Wed 2022-05-11 22:56:41 UTC, end at Wed 2022-05-11 22:57:51 UTC. --
May 11 22:57:38 functional-20220511225632-7294 kubelet[5718]: I0511 22:57:38.347540 5718 scope.go:110] "RemoveContainer" containerID="eb08f8db1d6e27e36a666d8405b52ad35d7e7adaef728db64bd3e79de223bb55"
May 11 22:57:38 functional-20220511225632-7294 kubelet[5718]: E0511 22:57:38.348156 5718 remote_runtime.go:572] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error: No such container: eb08f8db1d6e27e36a666d8405b52ad35d7e7adaef728db64bd3e79de223bb55" containerID="eb08f8db1d6e27e36a666d8405b52ad35d7e7adaef728db64bd3e79de223bb55"
May 11 22:57:38 functional-20220511225632-7294 kubelet[5718]: I0511 22:57:38.348193 5718 pod_container_deletor.go:52] "DeleteContainer returned error" containerID={Type:docker ID:eb08f8db1d6e27e36a666d8405b52ad35d7e7adaef728db64bd3e79de223bb55} err="failed to get container status \"eb08f8db1d6e27e36a666d8405b52ad35d7e7adaef728db64bd3e79de223bb55\": rpc error: code = Unknown desc = Error: No such container: eb08f8db1d6e27e36a666d8405b52ad35d7e7adaef728db64bd3e79de223bb55"
May 11 22:57:38 functional-20220511225632-7294 kubelet[5718]: I0511 22:57:38.361394 5718 request.go:665] Waited for 1.052001229s due to client-side throttling, not priority and fairness, request: POST:https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods
May 11 22:57:39 functional-20220511225632-7294 kubelet[5718]: E0511 22:57:39.980132 5718 remote_runtime.go:479] "StopContainer from runtime service failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: 0c161d1f0fc54e20a77d39b045aad7b0cc1f936dd47835514ec3de43d767b404" containerID="0c161d1f0fc54e20a77d39b045aad7b0cc1f936dd47835514ec3de43d767b404"
May 11 22:57:39 functional-20220511225632-7294 kubelet[5718]: E0511 22:57:39.980196 5718 kuberuntime_container.go:728] "Container termination failed with gracePeriod" err="rpc error: code = Unknown desc = Error response from daemon: No such container: 0c161d1f0fc54e20a77d39b045aad7b0cc1f936dd47835514ec3de43d767b404" pod="kube-system/kube-apiserver-functional-20220511225632-7294" podUID=ac7dfe7f1749461f5e23fe13af8b8122 containerName="kube-apiserver" containerID="docker://0c161d1f0fc54e20a77d39b045aad7b0cc1f936dd47835514ec3de43d767b404" gracePeriod=1
May 11 22:57:39 functional-20220511225632-7294 kubelet[5718]: E0511 22:57:39.980217 5718 kuberuntime_container.go:753] "Kill container failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: 0c161d1f0fc54e20a77d39b045aad7b0cc1f936dd47835514ec3de43d767b404" pod="kube-system/kube-apiserver-functional-20220511225632-7294" podUID=ac7dfe7f1749461f5e23fe13af8b8122 containerName="kube-apiserver" containerID={Type:docker ID:0c161d1f0fc54e20a77d39b045aad7b0cc1f936dd47835514ec3de43d767b404}
May 11 22:57:39 functional-20220511225632-7294 kubelet[5718]: E0511 22:57:39.981591 5718 kubelet.go:1777] failed to "KillContainer" for "kube-apiserver" with KillContainerError: "rpc error: code = Unknown desc = Error response from daemon: No such container: 0c161d1f0fc54e20a77d39b045aad7b0cc1f936dd47835514ec3de43d767b404"
May 11 22:57:39 functional-20220511225632-7294 kubelet[5718]: E0511 22:57:39.981647 5718 pod_workers.go:949] "Error syncing pod, skipping" err="failed to \"KillContainer\" for \"kube-apiserver\" with KillContainerError: \"rpc error: code = Unknown desc = Error response from daemon: No such container: 0c161d1f0fc54e20a77d39b045aad7b0cc1f936dd47835514ec3de43d767b404\"" pod="kube-system/kube-apiserver-functional-20220511225632-7294" podUID=ac7dfe7f1749461f5e23fe13af8b8122
May 11 22:57:39 functional-20220511225632-7294 kubelet[5718]: I0511 22:57:39.982696 5718 kubelet_volumes.go:160] "Cleaned up orphaned pod volumes dir" podUID=ac7dfe7f1749461f5e23fe13af8b8122 path="/var/lib/kubelet/pods/ac7dfe7f1749461f5e23fe13af8b8122/volumes"
May 11 22:57:40 functional-20220511225632-7294 kubelet[5718]: E0511 22:57:40.311599 5718 projected.go:199] Error preparing data for projected volume kube-api-access-5xj69 for pod kube-system/storage-provisioner: failed to fetch token: serviceaccounts "storage-provisioner" is forbidden: User "system:node:functional-20220511225632-7294" cannot create resource "serviceaccounts/token" in API group "" in the namespace "kube-system": no relationship found between node 'functional-20220511225632-7294' and this object
May 11 22:57:40 functional-20220511225632-7294 kubelet[5718]: E0511 22:57:40.311711 5718 nestedpendingoperations.go:335] Operation for "{volumeName:kubernetes.io/projected/13d6b36a-da63-427d-9a0c-67cc25dc9131-kube-api-access-5xj69 podName:13d6b36a-da63-427d-9a0c-67cc25dc9131 nodeName:}" failed. No retries permitted until 2022-05-11 22:57:40.811678818 +0000 UTC m=+5.258951565 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-5xj69" (UniqueName: "kubernetes.io/projected/13d6b36a-da63-427d-9a0c-67cc25dc9131-kube-api-access-5xj69") pod "storage-provisioner" (UID: "13d6b36a-da63-427d-9a0c-67cc25dc9131") : failed to fetch token: serviceaccounts "storage-provisioner" is forbidden: User "system:node:functional-20220511225632-7294" cannot create resource "serviceaccounts/token" in API group "" in the namespace "kube-system": no relationship found between node 'functional-20220511225632-7294' and this object
May 11 22:57:40 functional-20220511225632-7294 kubelet[5718]: E0511 22:57:40.311808 5718 projected.go:199] Error preparing data for projected volume kube-api-access-fpw8c for pod kube-system/coredns-64897985d-rp5lc: failed to fetch token: serviceaccounts "coredns" is forbidden: User "system:node:functional-20220511225632-7294" cannot create resource "serviceaccounts/token" in API group "" in the namespace "kube-system": no relationship found between node 'functional-20220511225632-7294' and this object
May 11 22:57:40 functional-20220511225632-7294 kubelet[5718]: E0511 22:57:40.311872 5718 nestedpendingoperations.go:335] Operation for "{volumeName:kubernetes.io/projected/88948fa9-8a33-48b9-b0c6-4e9a46669f71-kube-api-access-fpw8c podName:88948fa9-8a33-48b9-b0c6-4e9a46669f71 nodeName:}" failed. No retries permitted until 2022-05-11 22:57:40.811853171 +0000 UTC m=+5.259125917 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-fpw8c" (UniqueName: "kubernetes.io/projected/88948fa9-8a33-48b9-b0c6-4e9a46669f71-kube-api-access-fpw8c") pod "coredns-64897985d-rp5lc" (UID: "88948fa9-8a33-48b9-b0c6-4e9a46669f71") : failed to fetch token: serviceaccounts "coredns" is forbidden: User "system:node:functional-20220511225632-7294" cannot create resource "serviceaccounts/token" in API group "" in the namespace "kube-system": no relationship found between node 'functional-20220511225632-7294' and this object
May 11 22:57:40 functional-20220511225632-7294 kubelet[5718]: E0511 22:57:40.311926 5718 projected.go:199] Error preparing data for projected volume kube-api-access-wjcgq for pod kube-system/kube-proxy-dvl88: failed to fetch token: serviceaccounts "kube-proxy" is forbidden: User "system:node:functional-20220511225632-7294" cannot create resource "serviceaccounts/token" in API group "" in the namespace "kube-system": no relationship found between node 'functional-20220511225632-7294' and this object
May 11 22:57:40 functional-20220511225632-7294 kubelet[5718]: E0511 22:57:40.311962 5718 nestedpendingoperations.go:335] Operation for "{volumeName:kubernetes.io/projected/2a0a0c62-17ad-4a54-a1c2-c7afd88c9c38-kube-api-access-wjcgq podName:2a0a0c62-17ad-4a54-a1c2-c7afd88c9c38 nodeName:}" failed. No retries permitted until 2022-05-11 22:57:40.811951047 +0000 UTC m=+5.259223787 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-wjcgq" (UniqueName: "kubernetes.io/projected/2a0a0c62-17ad-4a54-a1c2-c7afd88c9c38-kube-api-access-wjcgq") pod "kube-proxy-dvl88" (UID: "2a0a0c62-17ad-4a54-a1c2-c7afd88c9c38") : failed to fetch token: serviceaccounts "kube-proxy" is forbidden: User "system:node:functional-20220511225632-7294" cannot create resource "serviceaccounts/token" in API group "" in the namespace "kube-system": no relationship found between node 'functional-20220511225632-7294' and this object
May 11 22:57:40 functional-20220511225632-7294 kubelet[5718]: I0511 22:57:40.467768 5718 kubelet.go:1698] "Deleted mirror pod because it is outdated" pod="kube-system/kube-apiserver-functional-20220511225632-7294"
May 11 22:57:40 functional-20220511225632-7294 kubelet[5718]: E0511 22:57:40.470681 5718 kubelet.go:1711] "Failed creating a mirror pod for" err="pods \"kube-controller-manager-functional-20220511225632-7294\" already exists" pod="kube-system/kube-controller-manager-functional-20220511225632-7294"
May 11 22:57:40 functional-20220511225632-7294 kubelet[5718]: E0511 22:57:40.472636 5718 kubelet.go:1711] "Failed creating a mirror pod for" err="pods \"kube-scheduler-functional-20220511225632-7294\" already exists" pod="kube-system/kube-scheduler-functional-20220511225632-7294"
May 11 22:57:40 functional-20220511225632-7294 kubelet[5718]: E0511 22:57:40.472662 5718 kubelet.go:1711] "Failed creating a mirror pod for" err="pods \"etcd-functional-20220511225632-7294\" already exists" pod="kube-system/etcd-functional-20220511225632-7294"
May 11 22:57:42 functional-20220511225632-7294 kubelet[5718]: I0511 22:57:42.265494 5718 scope.go:110] "RemoveContainer" containerID="371f9ddf15672c187f5cb7d898615742ac9ecc66ab0e259c357ef2f14c2fdb9e"
May 11 22:57:42 functional-20220511225632-7294 kubelet[5718]: I0511 22:57:42.391918 5718 kubelet.go:1693] "Trying to delete pod" pod="kube-system/kube-apiserver-functional-20220511225632-7294" podUID=a4f35d6f-dcbc-443e-8e7e-1fc37b28b6b5
May 11 22:57:42 functional-20220511225632-7294 kubelet[5718]: I0511 22:57:42.837241 5718 docker_sandbox.go:402] "Failed to read pod IP from plugin/docker" err="Couldn't find network status for kube-system/coredns-64897985d-rp5lc through plugin: invalid network status for"
May 11 22:57:43 functional-20220511225632-7294 kubelet[5718]: I0511 22:57:43.399555 5718 docker_sandbox.go:402] "Failed to read pod IP from plugin/docker" err="Couldn't find network status for kube-system/coredns-64897985d-rp5lc through plugin: invalid network status for"
May 11 22:57:44 functional-20220511225632-7294 kubelet[5718]: I0511 22:57:44.414070 5718 prober_manager.go:255] "Failed to trigger a manual run" probe="Readiness"
*
* ==> storage-provisioner [371f9ddf1567] <==
* I0511 22:57:30.676978 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
F0511 22:57:30.680422 1 main.go:39] error getting server version: Get "https://10.96.0.1:443/version?timeout=32s": dial tcp 10.96.0.1:443: connect: connection refused
*
* ==> storage-provisioner [90b845a53486] <==
* I0511 22:57:42.368341 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I0511 22:57:42.375088 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I0511 22:57:42.375126 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
-- /stdout --
helpers_test.go:254: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p functional-20220511225632-7294 -n functional-20220511225632-7294
helpers_test.go:261: (dbg) Run: kubectl --context functional-20220511225632-7294 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:270: non-running pods:
helpers_test.go:272: ======> post-mortem[TestFunctional/serial/ComponentHealth]: describe non-running pods <======
helpers_test.go:275: (dbg) Run: kubectl --context functional-20220511225632-7294 describe pod
helpers_test.go:275: (dbg) Non-zero exit: kubectl --context functional-20220511225632-7294 describe pod : exit status 1 (41.905518ms)
** stderr **
error: resource name may not be empty
** /stderr **
helpers_test.go:277: kubectl --context functional-20220511225632-7294 describe pod : exit status 1
--- FAIL: TestFunctional/serial/ComponentHealth (2.34s)