=== RUN TestSkaffold
skaffold_test.go:53: (dbg) Run: /tmp/skaffold.exe615250882 version
skaffold_test.go:57: skaffold version: v1.17.2
skaffold_test.go:60: (dbg) Run: out/minikube-linux-amd64 start -p skaffold-20201211204522-6575 --memory=2600 --driver=docker
skaffold_test.go:60: (dbg) Done: out/minikube-linux-amd64 start -p skaffold-20201211204522-6575 --memory=2600 --driver=docker : (28.998777817s)
skaffold_test.go:73: copying out/minikube-linux-amd64 to /home/jenkins/workspace/docker_Linux_integration/out/minikube
skaffold_test.go:97: (dbg) Run: /tmp/skaffold.exe615250882 run --minikube-profile skaffold-20201211204522-6575 --kube-context skaffold-20201211204522-6575 --status-check=true --port-forward=false
skaffold_test.go:97: (dbg) Non-zero exit: /tmp/skaffold.exe615250882 run --minikube-profile skaffold-20201211204522-6575 --kube-context skaffold-20201211204522-6575 --status-check=true --port-forward=false: exit status 1 (9.865332466s)
-- stdout --
Generating tags...
- leeroy-web -> leeroy-web:latest
- leeroy-app -> leeroy-app:latest
Some taggers failed. Rerun with -vdebug for errors.
Checking cache...
- leeroy-web: Error checking cache.
-- /stdout --
** stderr **
failed to build: getting imageID for leeroy-web:latest: The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings: Get "https://192.168.59.176:2376/v1.24/images/leeroy-web:latest/json": remote error: tls: bad certificate
** /stderr **
skaffold_test.go:99: error running skaffold: exit status 1
-- stdout --
Generating tags...
- leeroy-web -> leeroy-web:latest
- leeroy-app -> leeroy-app:latest
Some taggers failed. Rerun with -vdebug for errors.
Checking cache...
- leeroy-web: Error checking cache.
-- /stdout --
** stderr **
failed to build: getting imageID for leeroy-web:latest: The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings: Get "https://192.168.59.176:2376/v1.24/images/leeroy-web:latest/json": remote error: tls: bad certificate
** /stderr **
panic.go:617: *** TestSkaffold FAILED at 2020-12-11 20:46:02.021156039 +0000 UTC m=+1090.497231383
helpers_test.go:216: -----------------------post-mortem--------------------------------
helpers_test.go:224: ======> post-mortem[TestSkaffold]: docker inspect <======
helpers_test.go:225: (dbg) Run: docker inspect skaffold-20201211204522-6575
helpers_test.go:229: (dbg) docker inspect skaffold-20201211204522-6575:
-- stdout --
[
{
"Id": "d3f75f60c5cf1bdc8bbdb4b8be889fbbb1544ea4be1935ee581b2c7311b69c51",
"Created": "2020-12-11T20:45:24.835895759Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 100633,
"ExitCode": 0,
"Error": "",
"StartedAt": "2020-12-11T20:45:25.381502386Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:06db6ca724463f987019154e0475424113315da76733d5b67f90e35719d46c4d",
"ResolvConfPath": "/var/lib/docker/containers/d3f75f60c5cf1bdc8bbdb4b8be889fbbb1544ea4be1935ee581b2c7311b69c51/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/d3f75f60c5cf1bdc8bbdb4b8be889fbbb1544ea4be1935ee581b2c7311b69c51/hostname",
"HostsPath": "/var/lib/docker/containers/d3f75f60c5cf1bdc8bbdb4b8be889fbbb1544ea4be1935ee581b2c7311b69c51/hosts",
"LogPath": "/var/lib/docker/containers/d3f75f60c5cf1bdc8bbdb4b8be889fbbb1544ea4be1935ee581b2c7311b69c51/d3f75f60c5cf1bdc8bbdb4b8be889fbbb1544ea4be1935ee581b2c7311b69c51-json.log",
"Name": "/skaffold-20201211204522-6575",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"skaffold-20201211204522-6575:/var",
"/lib/modules:/lib/modules:ro"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "skaffold-20201211204522-6575",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"CapAdd": null,
"CapDrop": null,
"Capabilities": null,
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"ConsoleSize": [
0,
0
],
"Isolation": "",
"CpuShares": 0,
"Memory": 2726297600,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": null,
"BlkioDeviceWriteBps": null,
"BlkioDeviceReadIOps": null,
"BlkioDeviceWriteIOps": null,
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"KernelMemory": 0,
"KernelMemoryTCP": 0,
"MemoryReservation": 0,
"MemorySwap": -1,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": null,
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"LowerDir": "/var/lib/docker/overlay2/c3b88d24e06a6dc408276b8a3a7c237801f805d751f777ba0c08ebdc374154e3-init/diff:/var/lib/docker/overlay2/0e454b6080878527938ea7de6c9c9ca3b4e998e7de12dad0ae5d11e5a02b28ed/diff:/var/lib/docker/overlay2/b1080cf3f1c07fe0908008d3edafdd563736ac0728810681091089d69df5a2b3/diff:/var/lib/docker/overlay2/4b9b1225dce44601fa36a73ecf95bd95cdf7d55d704e1b1b9c58ca9a4f4c9db6/diff:/var/lib/docker/overlay2/9afd996885692e28e1b5ceccecab499370a88a6f5aa9af63f99ddcdf47496087/diff:/var/lib/docker/overlay2/e172f94dbfa36e7b7833646884a898590a9a7bb72f03461557b32d46044e2bf3/diff:/var/lib/docker/overlay2/5e940e100bc7df14fbaaca63292506dc23bd933bac16de182b67f8107cfc71b5/diff:/var/lib/docker/overlay2/597b039ba9eb2c747ffeca49b446b61e3121712aac6a1ce013d50a6998c46d93/diff:/var/lib/docker/overlay2/a3668e684110bc4003a16d3fe4a80296a1308a376b08c428b3ae7469edae4b8b/diff:/var/lib/docker/overlay2/617d4351ebaac66d685427e45dfe1cd0a4e0e0ac9dc4ccb7a2f382e1bfc8697d/diff:/var/lib/docker/overlay2/4ac76b
6b6d368a1f1073f4a45b6b80445e04f69010fcc5e524fe8edc2708fd5c/diff:/var/lib/docker/overlay2/155961688c82c43af6d27a734eeeae0fd8eb1766bbb9d2728d834a593622196c/diff:/var/lib/docker/overlay2/8f6b3c33ada50dd91034ae6c41722655db1e7a86bb4b61e1152696c41336aa44/diff:/var/lib/docker/overlay2/39286e41dafe62c271b224fbeaa14b9ca058246bcc76e7a81d75f765a497015e/diff:/var/lib/docker/overlay2/bc0dfc1142718ddc4a235a7a62a371f8d580e48ef41f886bce3bb6598f329ea5/diff:/var/lib/docker/overlay2/285b5808cfef05f77db3330400ad926f089148ece291f130ab7f4c822fa7be5a/diff:/var/lib/docker/overlay2/ac9feea04da985551cdd80f2698f28d116958d31882dc3888245ace574de7021/diff:/var/lib/docker/overlay2/93bc1cd943ffc655fc209b896ce12e6863e5adcbc32cee2830311b118ef17f97/diff:/var/lib/docker/overlay2/e9ca47c898e17aff2b310e13256ac91b8efff61646ca77ebe764817e42c9e278/diff:/var/lib/docker/overlay2/a0c4a393ccf7eb7a3b75e0e421d72749d0641f4e74b689d9b2a1dc9d5f9f2985/diff:/var/lib/docker/overlay2/f3ed5047774399a74e83080908727ed9f42ef86312f5879e724083ee96dc4a98/diff:/var/lib/d
ocker/overlay2/17ad49d1fc1b2eb336aaec3919f67e90045a835b6ad03fa72a6a02f8b2d7a6f9/diff:/var/lib/docker/overlay2/dda0100db23cb2ecb0d502c53e7269e90548d0f6043615cfefea6fd1a42ef67f/diff:/var/lib/docker/overlay2/accfdaeb1d703f222e13547e8fd4c06305c41c6256ac59237b67ac17b702ff5d/diff:/var/lib/docker/overlay2/e4dc6c7d508ce1056ebda7e5bf4239bb48eaa2ad06a4d483e67380212ef84a10/diff:/var/lib/docker/overlay2/d6be635d55a88f802a01d5678467aa3fe46b951550c2d880458b733ff9f56a19/diff:/var/lib/docker/overlay2/d31bed28baf1efe4c8eea0c512e6641cdfa98622cfa1f45f4f463c8e4f0ea9e6/diff:/var/lib/docker/overlay2/4eb064c2961cd60979726e7d7a78d8ac3a96af3b41699c69090b4aec9263e5f7/diff:/var/lib/docker/overlay2/66ec0abca0956048e37f5c5e2125cf299a362b35d473021004316fd83d85d33b/diff:/var/lib/docker/overlay2/5ba45d5dede37c09dccf9193592382ae7516027a675d2631ec64e687b9745c00/diff:/var/lib/docker/overlay2/1ceade4823b29f813f08c3db2bd4d966999ac776084d4b7b054d7220b5689943/diff:/var/lib/docker/overlay2/0e3148261963465326228e3e9b1f52398a9551f01263a9b78bebaa06184
ed2af/diff:/var/lib/docker/overlay2/4d90ca5a45e75e28de2e79390ac1c0075f60b1bbd9446a169ec9c45ca0702256/diff:/var/lib/docker/overlay2/b317057fb455e15ebe8bf80b7713f8ad35aff0405e06e48a935b1965b47214e7/diff:/var/lib/docker/overlay2/ed65d2c3c21872669bade9290359857509ebcf1d7427db303d876c8efdcda07b/diff",
"MergedDir": "/var/lib/docker/overlay2/c3b88d24e06a6dc408276b8a3a7c237801f805d751f777ba0c08ebdc374154e3/merged",
"UpperDir": "/var/lib/docker/overlay2/c3b88d24e06a6dc408276b8a3a7c237801f805d751f777ba0c08ebdc374154e3/diff",
"WorkDir": "/var/lib/docker/overlay2/c3b88d24e06a6dc408276b8a3a7c237801f805d751f777ba0c08ebdc374154e3/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "skaffold-20201211204522-6575",
"Source": "/var/lib/docker/volumes/skaffold-20201211204522-6575/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "skaffold-20201211204522-6575",
"Domainname": "",
"User": "root",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase:v0.0.15-snapshot4@sha256:ef1f485b5a1cfa4c989bc05e153f0a8525968ec999e242efff871cbb31649c16",
"Volumes": null,
"WorkingDir": "",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "skaffold-20201211204522-6575",
"name.minikube.sigs.k8s.io": "skaffold-20201211204522-6575",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "31f499dae66e59ec12b3ba07e7737045ed19453886638274e0fe55de629caf0c",
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32827"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32826"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32825"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32824"
}
]
},
"SandboxKey": "/var/run/docker/netns/31f499dae66e",
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"skaffold-20201211204522-6575": {
"IPAMConfig": {
"IPv4Address": "192.168.59.176"
},
"Links": null,
"Aliases": [
"d3f75f60c5cf"
],
"NetworkID": "6869d2c96cc4881e8e6a12d4e175341c012ec06ec5e778cdde15b9321969fe9b",
"EndpointID": "4c9de1aee08cecbad865066e3afea4b30706941d2a2c1a6597a134e771e06a2f",
"Gateway": "192.168.59.1",
"IPAddress": "192.168.59.176",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"MacAddress": "02:42:c0:a8:3b:b0",
"DriverOpts": null
}
}
}
}
]
-- /stdout --
helpers_test.go:233: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p skaffold-20201211204522-6575 -n skaffold-20201211204522-6575
helpers_test.go:238: <<< TestSkaffold FAILED: start of post-mortem logs <<<
helpers_test.go:239: ======> post-mortem[TestSkaffold]: minikube logs <======
helpers_test.go:241: (dbg) Run: out/minikube-linux-amd64 -p skaffold-20201211204522-6575 logs -n 25
helpers_test.go:241: (dbg) Done: out/minikube-linux-amd64 -p skaffold-20201211204522-6575 logs -n 25: (1.944589651s)
helpers_test.go:246: TestSkaffold logs:
-- stdout --
* ==> Docker <==
* -- Logs begin at Fri 2020-12-11 20:45:25 UTC, end at Fri 2020-12-11 20:46:03 UTC. --
* Dec 11 20:45:53 skaffold-20201211204522-6575 dockerd[2914]: time="2020-12-11T20:45:53.911382240Z" level=info msg="ccResolverWrapper: sending update to cc: {[{unix:///run/containerd/containerd.sock <nil> 0 <nil>}] <nil> <nil>}" module=grpc
* Dec 11 20:45:53 skaffold-20201211204522-6575 dockerd[2914]: time="2020-12-11T20:45:53.911400348Z" level=info msg="ClientConn switching balancer to \"pick_first\"" module=grpc
* Dec 11 20:45:53 skaffold-20201211204522-6575 dockerd[2914]: time="2020-12-11T20:45:53.912524513Z" level=info msg="parsed scheme: \"unix\"" module=grpc
* Dec 11 20:45:53 skaffold-20201211204522-6575 dockerd[2914]: time="2020-12-11T20:45:53.912551598Z" level=info msg="scheme \"unix\" not registered, fallback to default scheme" module=grpc
* Dec 11 20:45:53 skaffold-20201211204522-6575 dockerd[2914]: time="2020-12-11T20:45:53.912576132Z" level=info msg="ccResolverWrapper: sending update to cc: {[{unix:///run/containerd/containerd.sock <nil> 0 <nil>}] <nil> <nil>}" module=grpc
* Dec 11 20:45:53 skaffold-20201211204522-6575 dockerd[2914]: time="2020-12-11T20:45:53.912594113Z" level=info msg="ClientConn switching balancer to \"pick_first\"" module=grpc
* Dec 11 20:45:53 skaffold-20201211204522-6575 dockerd[2914]: time="2020-12-11T20:45:53.931632739Z" level=info msg="[graphdriver] using prior storage driver: overlay2"
* Dec 11 20:45:53 skaffold-20201211204522-6575 dockerd[2914]: time="2020-12-11T20:45:53.940289407Z" level=warning msg="Your kernel does not support swap memory limit"
* Dec 11 20:45:53 skaffold-20201211204522-6575 dockerd[2914]: time="2020-12-11T20:45:53.940316550Z" level=warning msg="Your kernel does not support CPU realtime scheduler"
* Dec 11 20:45:53 skaffold-20201211204522-6575 dockerd[2914]: time="2020-12-11T20:45:53.940496433Z" level=info msg="Loading containers: start."
* Dec 11 20:45:54 skaffold-20201211204522-6575 dockerd[2914]: time="2020-12-11T20:45:54.150830259Z" level=info msg="Removing stale sandbox 4f33f23a6626dec18e5579332b0fec747db1940dc946b3b8848e979c1dc71260 (124ac1733d1bf863ed3310cf60535bf6ad6b0fec4417f1e5912f6da3c5779eaa)"
* Dec 11 20:45:54 skaffold-20201211204522-6575 dockerd[2914]: time="2020-12-11T20:45:54.153599433Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 3792677e091ee505bda7597ba3046009aafc9ec03ac914259bfd835a13217855 cbf5752a699308c35e079f033f67743ddd6cdfd239f49aca92d3f91584ce3081], retrying...."
* Dec 11 20:45:54 skaffold-20201211204522-6575 dockerd[2914]: time="2020-12-11T20:45:54.205879117Z" level=info msg="Default bridge (docker0) is assigned with an IP address 172.17.0.0/16. Daemon option --bip can be used to set a preferred IP address"
* Dec 11 20:45:54 skaffold-20201211204522-6575 dockerd[2914]: time="2020-12-11T20:45:54.249708982Z" level=info msg="Loading containers: done."
* Dec 11 20:45:54 skaffold-20201211204522-6575 dockerd[2914]: time="2020-12-11T20:45:54.279826859Z" level=info msg="Docker daemon" commit=eeddea2 graphdriver(s)=overlay2 version=20.10.0
* Dec 11 20:45:54 skaffold-20201211204522-6575 dockerd[2914]: time="2020-12-11T20:45:54.279903929Z" level=info msg="Daemon has completed initialization"
* Dec 11 20:45:54 skaffold-20201211204522-6575 systemd[1]: Started Docker Application Container Engine.
* Dec 11 20:45:54 skaffold-20201211204522-6575 dockerd[2914]: time="2020-12-11T20:45:54.298369931Z" level=info msg="API listen on [::]:2376"
* Dec 11 20:45:54 skaffold-20201211204522-6575 dockerd[2914]: time="2020-12-11T20:45:54.302868821Z" level=info msg="API listen on /var/run/docker.sock"
* Dec 11 20:45:59 skaffold-20201211204522-6575 dockerd[2914]: http: TLS handshake error from 192.168.59.1:36520: tls: failed to verify client's certificate: x509: certificate signed by unknown authority (possibly because of "crypto/rsa: verification error" while trying to verify candidate authority certificate "jenkins")
* Dec 11 20:45:59 skaffold-20201211204522-6575 dockerd[2914]: http: TLS handshake error from 192.168.59.1:36522: tls: failed to verify client's certificate: x509: certificate signed by unknown authority (possibly because of "crypto/rsa: verification error" while trying to verify candidate authority certificate "jenkins")
* Dec 11 20:46:00 skaffold-20201211204522-6575 dockerd[2914]: http: TLS handshake error from 192.168.59.1:36524: tls: failed to verify client's certificate: x509: certificate signed by unknown authority (possibly because of "crypto/rsa: verification error" while trying to verify candidate authority certificate "jenkins")
* Dec 11 20:46:01 skaffold-20201211204522-6575 dockerd[2914]: http: TLS handshake error from 192.168.59.1:36598: tls: failed to verify client's certificate: x509: certificate signed by unknown authority (possibly because of "crypto/rsa: verification error" while trying to verify candidate authority certificate "jenkins")
* Dec 11 20:46:02 skaffold-20201211204522-6575 dockerd[2914]: http: TLS handshake error from 192.168.59.1:36608: tls: failed to verify client's certificate: x509: certificate signed by unknown authority (possibly because of "crypto/rsa: verification error" while trying to verify candidate authority certificate "jenkins")
* Dec 11 20:46:02 skaffold-20201211204522-6575 dockerd[2914]: http: TLS handshake error from 192.168.59.1:36606: tls: failed to verify client's certificate: x509: certificate signed by unknown authority (possibly because of "crypto/rsa: verification error" while trying to verify candidate authority certificate "jenkins")
*
* ==> container status <==
* CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID
* ce983f1aa07f7 0369cf4303ffd 8 seconds ago Running etcd 1 93aba77f77f5c
* 78eae12b32cbf 3138b6e3d4712 8 seconds ago Running kube-scheduler 1 562bb8602b367
* eab3db0200bb0 ca9843d3b5454 8 seconds ago Running kube-apiserver 1 1b5e53f6b0da4
* 72de7737a0715 b9fa1895dcaa6 8 seconds ago Running kube-controller-manager 1 aac72db6481cc
* 7b6ef6577ba73 ca9843d3b5454 23 seconds ago Exited kube-apiserver 0 7ac653ada5bc8
* 76fa734100cb1 b9fa1895dcaa6 23 seconds ago Exited kube-controller-manager 0 c441e30e9faf5
* 4dace7a8f06f6 3138b6e3d4712 23 seconds ago Exited kube-scheduler 0 3b8163b702b61
* 34ff8741eb9e5 0369cf4303ffd 23 seconds ago Exited etcd 0 a023ef9b93da9
*
* ==> describe nodes <==
* Name: skaffold-20201211204522-6575
* Roles: control-plane,master
* Labels: beta.kubernetes.io/arch=amd64
* beta.kubernetes.io/os=linux
* kubernetes.io/arch=amd64
* kubernetes.io/hostname=skaffold-20201211204522-6575
* kubernetes.io/os=linux
* minikube.k8s.io/commit=fc69cfe93e0c46b6d41ab5653129ddf7843209ed
* minikube.k8s.io/name=skaffold-20201211204522-6575
* minikube.k8s.io/updated_at=2020_12_11T20_45_49_0700
* minikube.k8s.io/version=v1.15.1
* node-role.kubernetes.io/control-plane=
* node-role.kubernetes.io/master=
* Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /var/run/dockershim.sock
* volumes.kubernetes.io/controller-managed-attach-detach: true
* CreationTimestamp: Fri, 11 Dec 2020 20:45:46 +0000
* Taints: node.kubernetes.io/not-ready:NoSchedule
* Unschedulable: false
* Lease:
* HolderIdentity: skaffold-20201211204522-6575
* AcquireTime: <unset>
* RenewTime: Fri, 11 Dec 2020 20:46:00 +0000
* Conditions:
* Type Status LastHeartbeatTime LastTransitionTime Reason Message
* ---- ------ ----------------- ------------------ ------ -------
* MemoryPressure False Fri, 11 Dec 2020 20:46:01 +0000 Fri, 11 Dec 2020 20:45:42 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
* DiskPressure False Fri, 11 Dec 2020 20:46:01 +0000 Fri, 11 Dec 2020 20:45:42 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
* PIDPressure False Fri, 11 Dec 2020 20:46:01 +0000 Fri, 11 Dec 2020 20:45:42 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
* Ready True Fri, 11 Dec 2020 20:46:01 +0000 Fri, 11 Dec 2020 20:46:01 +0000 KubeletReady kubelet is posting ready status
* Addresses:
* InternalIP: 192.168.59.176
* Hostname: skaffold-20201211204522-6575
* Capacity:
* cpu: 8
* ephemeral-storage: 309568300Ki
* hugepages-1Gi: 0
* hugepages-2Mi: 0
* memory: 30887000Ki
* pods: 110
* Allocatable:
* cpu: 8
* ephemeral-storage: 309568300Ki
* hugepages-1Gi: 0
* hugepages-2Mi: 0
* memory: 30887000Ki
* pods: 110
* System Info:
* Machine ID: ee28759ded1d4df1ae60839826a47b5c
* System UUID: ccff03d2-3662-4261-b32e-b8f24caf6254
* Boot ID: ff2e882c-ceac-4ec5-a892-a979e1bf648a
* Kernel Version: 4.9.0-14-amd64
* OS Image: Ubuntu 20.04.1 LTS
* Operating System: linux
* Architecture: amd64
* Container Runtime Version: docker://20.10.0
* Kubelet Version: v1.20.0
* Kube-Proxy Version: v1.20.0
* Non-terminated Pods: (4 in total)
* Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE
* --------- ---- ------------ ---------- --------------- ------------- ---
* kube-system etcd-skaffold-20201211204522-6575 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 12s
* kube-system kube-apiserver-skaffold-20201211204522-6575 250m (3%) 0 (0%) 0 (0%) 0 (0%) 12s
* kube-system kube-controller-manager-skaffold-20201211204522-6575 200m (2%) 0 (0%) 0 (0%) 0 (0%) 12s
* kube-system kube-scheduler-skaffold-20201211204522-6575 100m (1%) 0 (0%) 0 (0%) 0 (0%) 12s
* Allocated resources:
* (Total limits may be over 100 percent, i.e., overcommitted.)
* Resource Requests Limits
* -------- -------- ------
* cpu 650m (8%) 0 (0%)
* memory 100Mi (0%) 0 (0%)
* ephemeral-storage 100Mi (0%) 0 (0%)
* hugepages-1Gi 0 (0%) 0 (0%)
* hugepages-2Mi 0 (0%) 0 (0%)
* Events:
* Type Reason Age From Message
* ---- ------ ---- ---- -------
* Normal NodeHasSufficientMemory 24s (x5 over 24s) kubelet Node skaffold-20201211204522-6575 status is now: NodeHasSufficientMemory
* Normal NodeHasNoDiskPressure 24s (x4 over 24s) kubelet Node skaffold-20201211204522-6575 status is now: NodeHasNoDiskPressure
* Normal NodeHasSufficientPID 24s (x4 over 24s) kubelet Node skaffold-20201211204522-6575 status is now: NodeHasSufficientPID
* Normal Starting 13s kubelet Starting kubelet.
* Normal NodeHasSufficientMemory 13s kubelet Node skaffold-20201211204522-6575 status is now: NodeHasSufficientMemory
* Normal NodeHasNoDiskPressure 13s kubelet Node skaffold-20201211204522-6575 status is now: NodeHasNoDiskPressure
* Normal NodeHasSufficientPID 13s kubelet Node skaffold-20201211204522-6575 status is now: NodeHasSufficientPID
* Normal NodeNotReady 12s kubelet Node skaffold-20201211204522-6575 status is now: NodeNotReady
* Normal NodeAllocatableEnforced 12s kubelet Updated Node Allocatable limit across pods
*
* ==> dmesg <==
* [ +0.000003] ll header: 00000000: ff ff ff ff ff ff e2 00 6b 4f 61 f3 08 06 ........kOa...
* [ +1.532305] IPv4: martian source 10.85.0.6 from 10.85.0.6, on dev eth0
* [ +0.000003] ll header: 00000000: ff ff ff ff ff ff 1e ec 44 bb 6e 82 08 06 ........D.n...
* [ +1.660050] IPv4: martian source 10.85.0.7 from 10.85.0.7, on dev eth0
* [ +0.000003] ll header: 00000000: ff ff ff ff ff ff de db 52 e5 c3 20 08 06 ........R.. ..
* [ +0.870917] IPv4: martian source 10.85.0.8 from 10.85.0.8, on dev eth0
* [ +0.000003] ll header: 00000000: ff ff ff ff ff ff b2 69 2e ed 78 05 08 06 .......i..x...
* [ +1.300172] IPv4: martian source 10.85.0.9 from 10.85.0.9, on dev eth0
* [ +0.000002] ll header: 00000000: ff ff ff ff ff ff d2 67 cb cb 00 84 08 06 .......g......
* [ +1.032988] IPv4: martian source 10.85.0.10 from 10.85.0.10, on dev eth0
* [ +0.000002] ll header: 00000000: ff ff ff ff ff ff 0a 47 ae fa 47 c2 08 06 .......G..G...
* [ +1.018971] IPv4: martian source 10.85.0.11 from 10.85.0.11, on dev eth0
* [ +0.000003] ll header: 00000000: ff ff ff ff ff ff d6 bf 4e 88 68 91 08 06 ........N.h...
* [ +1.026702] IPv4: martian source 10.244.0.2 from 10.244.0.2, on dev vethd6ee52ab
* [ +0.000003] ll header: 00000000: ff ff ff ff ff ff 8a 0c 6e 94 e2 33 08 06 ........n..3..
* [ +5.124598] cgroup: cgroup2: unknown option "nsdelegate"
* [ +17.388333] cgroup: cgroup2: unknown option "nsdelegate"
* [Dec11 20:38] cgroup: cgroup2: unknown option "nsdelegate"
* [Dec11 20:39] cgroup: cgroup2: unknown option "nsdelegate"
* [ +16.296127] IPv4: martian source 10.244.0.2 from 10.244.0.2, on dev veth6d3b9e3d
* [ +0.000003] ll header: 00000000: ff ff ff ff ff ff fa eb fa d7 11 74 08 06 ...........t..
* [ +3.224752] cgroup: cgroup2: unknown option "nsdelegate"
* [Dec11 20:40] cgroup: cgroup2: unknown option "nsdelegate"
* [Dec11 20:44] cgroup: cgroup2: unknown option "nsdelegate"
* [Dec11 20:45] cgroup: cgroup2: unknown option "nsdelegate"
*
* ==> etcd [34ff8741eb9e] <==
* raft2020/12/11 20:45:40 INFO: 3be816cd21eae4fe switched to configuration voters=(4316725313127769342)
* 2020-12-11 20:45:40.940023 W | auth: simple token is not cryptographically signed
* 2020-12-11 20:45:40.945200 I | etcdserver: starting server... [version: 3.4.13, cluster version: to_be_decided]
* 2020-12-11 20:45:40.945443 I | etcdserver: 3be816cd21eae4fe as single-node; fast-forwarding 9 ticks (election ticks 10)
* raft2020/12/11 20:45:40 INFO: 3be816cd21eae4fe switched to configuration voters=(4316725313127769342)
* 2020-12-11 20:45:40.945821 I | etcdserver/membership: added member 3be816cd21eae4fe [https://192.168.59.176:2380] to cluster ce580c4975538a9c
* 2020-12-11 20:45:40.947625 I | embed: ClientTLS: cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file =
* 2020-12-11 20:45:40.947752 I | embed: listening for peers on 192.168.59.176:2380
* 2020-12-11 20:45:40.947844 I | embed: listening for metrics on http://127.0.0.1:2381
* raft2020/12/11 20:45:41 INFO: 3be816cd21eae4fe is starting a new election at term 1
* raft2020/12/11 20:45:41 INFO: 3be816cd21eae4fe became candidate at term 2
* raft2020/12/11 20:45:41 INFO: 3be816cd21eae4fe received MsgVoteResp from 3be816cd21eae4fe at term 2
* raft2020/12/11 20:45:41 INFO: 3be816cd21eae4fe became leader at term 2
* raft2020/12/11 20:45:41 INFO: raft.node: 3be816cd21eae4fe elected leader 3be816cd21eae4fe at term 2
* 2020-12-11 20:45:41.337704 I | etcdserver: setting up the initial cluster version to 3.4
* 2020-12-11 20:45:41.338875 N | etcdserver/membership: set the initial cluster version to 3.4
* 2020-12-11 20:45:41.338947 I | etcdserver/api: enabled capabilities for version 3.4
* 2020-12-11 20:45:41.339005 I | etcdserver: published {Name:skaffold-20201211204522-6575 ClientURLs:[https://192.168.59.176:2379]} to cluster ce580c4975538a9c
* 2020-12-11 20:45:41.339023 I | embed: ready to serve client requests
* 2020-12-11 20:45:41.339254 I | embed: ready to serve client requests
* 2020-12-11 20:45:41.340888 I | embed: serving client requests on 127.0.0.1:2379
* 2020-12-11 20:45:41.379774 I | embed: serving client requests on 192.168.59.176:2379
* 2020-12-11 20:45:52.805573 N | pkg/osutil: received terminated signal, shutting down...
* WARNING: 2020/12/11 20:45:52 grpc: addrConn.createTransport failed to connect to {127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
* 2020-12-11 20:45:52.822089 I | etcdserver: skipped leadership transfer for single voting member cluster
*
* ==> etcd [ce983f1aa07f] <==
* 2020-12-11 20:45:59.333677 W | etcdserver: read-only range request "key:\"/registry/validatingwebhookconfigurations/\" range_end:\"/registry/validatingwebhookconfigurations0\" count_only:true " with result "range_response_count:0 size:5" took too long (960.066041ms) to execute
* 2020-12-11 20:45:59.333988 W | etcdserver: read-only range request "key:\"/registry/rolebindings/\" range_end:\"/registry/rolebindings0\" limit:10000 " with result "range_response_count:12 size:9121" took too long (1.239943847s) to execute
* 2020-12-11 20:45:59.334162 W | etcdserver: read-only range request "key:\"/registry/rolebindings/\" range_end:\"/registry/rolebindings0\" count_only:true " with result "range_response_count:0 size:7" took too long (1.240001547s) to execute
* 2020-12-11 20:45:59.334368 W | etcdserver: read-only range request "key:\"/registry/roles/\" range_end:\"/registry/roles0\" limit:10000 " with result "range_response_count:12 size:7107" took too long (1.248947403s) to execute
* 2020-12-11 20:45:59.334403 W | etcdserver: read-only range request "key:\"/registry/podsecuritypolicy/\" range_end:\"/registry/podsecuritypolicy0\" limit:10000 " with result "range_response_count:0 size:5" took too long (1.256953672s) to execute
* 2020-12-11 20:45:59.334464 W | etcdserver: read-only range request "key:\"/registry/podsecuritypolicy/\" range_end:\"/registry/podsecuritypolicy0\" count_only:true " with result "range_response_count:0 size:5" took too long (1.257030741s) to execute
* 2020-12-11 20:45:59.334641 W | etcdserver: read-only range request "key:\"/registry/storageclasses/\" range_end:\"/registry/storageclasses0\" count_only:true " with result "range_response_count:0 size:7" took too long (1.144529008s) to execute
* 2020-12-11 20:45:59.334900 W | etcdserver: read-only range request "key:\"/registry/poddisruptionbudgets/\" range_end:\"/registry/poddisruptionbudgets0\" count_only:true " with result "range_response_count:0 size:5" took too long (1.265659898s) to execute
* 2020-12-11 20:45:59.335128 W | etcdserver: read-only range request "key:\"/registry/storageclasses/\" range_end:\"/registry/storageclasses0\" limit:10000 " with result "range_response_count:1 size:992" took too long (1.144677757s) to execute
* 2020-12-11 20:45:59.335400 W | etcdserver: read-only range request "key:\"/registry/poddisruptionbudgets/\" range_end:\"/registry/poddisruptionbudgets0\" limit:10000 " with result "range_response_count:0 size:5" took too long (1.265814552s) to execute
* 2020-12-11 20:45:59.336140 W | etcdserver: read-only range request "key:\"/registry/priorityclasses/\" range_end:\"/registry/priorityclasses0\" limit:10000 " with result "range_response_count:2 size:910" took too long (1.152946987s) to execute
* 2020-12-11 20:45:59.336522 W | etcdserver: read-only range request "key:\"/registry/runtimeclasses/\" range_end:\"/registry/runtimeclasses0\" limit:10000 " with result "range_response_count:0 size:5" took too long (1.274084154s) to execute
* 2020-12-11 20:45:59.336817 W | etcdserver: read-only range request "key:\"/registry/priorityclasses/\" range_end:\"/registry/priorityclasses0\" count_only:true " with result "range_response_count:0 size:7" took too long (1.153072296s) to execute
* 2020-12-11 20:45:59.337137 W | etcdserver: read-only range request "key:\"/registry/runtimeclasses/\" range_end:\"/registry/runtimeclasses0\" count_only:true " with result "range_response_count:0 size:5" took too long (1.274191871s) to execute
* 2020-12-11 20:45:59.338211 W | etcdserver: read-only range request "key:\"/registry/priorityclasses/\" range_end:\"/registry/priorityclasses0\" limit:10000 " with result "range_response_count:2 size:910" took too long (1.166226362s) to execute
* 2020-12-11 20:45:59.338550 W | etcdserver: read-only range request "key:\"/registry/runtimeclasses/\" range_end:\"/registry/runtimeclasses0\" count_only:true " with result "range_response_count:0 size:5" took too long (1.2838444s) to execute
* 2020-12-11 20:45:59.338734 W | etcdserver: read-only range request "key:\"/registry/priorityclasses/\" range_end:\"/registry/priorityclasses0\" count_only:true " with result "range_response_count:0 size:7" took too long (1.166393761s) to execute
* 2020-12-11 20:45:59.338819 W | etcdserver: read-only range request "key:\"/registry/runtimeclasses/\" range_end:\"/registry/runtimeclasses0\" limit:10000 " with result "range_response_count:0 size:5" took too long (1.283966376s) to execute
* 2020-12-11 20:45:59.339685 W | etcdserver: read-only range request "key:\"/registry/clusterrolebindings/\" range_end:\"/registry/clusterrolebindings0\" limit:10000 " with result "range_response_count:49 size:36604" took too long (1.175882312s) to execute
* 2020-12-11 20:45:59.339742 W | etcdserver: read-only range request "key:\"/registry/ingressclasses/\" range_end:\"/registry/ingressclasses0\" count_only:true " with result "range_response_count:0 size:5" took too long (1.294699726s) to execute
* 2020-12-11 20:45:59.339794 W | etcdserver: read-only range request "key:\"/registry/ingress/\" range_end:\"/registry/ingress0\" count_only:true " with result "range_response_count:0 size:5" took too long (1.302709061s) to execute
* 2020-12-11 20:46:01.969312 W | etcdserver: read-only range request "key:\"/registry/services/endpoints/default/kubernetes\" " with result "range_response_count:1 size:422" took too long (980.112115ms) to execute
* 2020-12-11 20:46:01.978124 W | etcdserver: read-only range request "key:\"/registry/resourcequotas/kube-system/\" range_end:\"/registry/resourcequotas/kube-system0\" " with result "range_response_count:0 size:5" took too long (777.960766ms) to execute
* 2020-12-11 20:46:01.978502 W | etcdserver: read-only range request "key:\"/registry/prioritylevelconfigurations/exempt\" " with result "range_response_count:1 size:371" took too long (983.200804ms) to execute
* 2020-12-11 20:46:01.978680 W | etcdserver: read-only range request "key:\"/registry/pods/kube-system/kube-apiserver-skaffold-20201211204522-6575\" " with result "range_response_count:1 size:6179" took too long (985.206807ms) to execute
*
* ==> kernel <==
* 20:46:03 up 28 min, 0 users, load average: 1.53, 1.86, 1.63
* Linux skaffold-20201211204522-6575 4.9.0-14-amd64 #1 SMP Debian 4.9.240-2 (2020-10-30) x86_64 x86_64 x86_64 GNU/Linux
* PRETTY_NAME="Ubuntu 20.04.1 LTS"
*
* ==> kube-apiserver [7b6ef6577ba7] <==
* W1211 20:45:52.820034 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
* I1211 20:45:52.820054 1 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick
* W1211 20:45:52.820077 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
* W1211 20:45:52.820078 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
* W1211 20:45:52.820112 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
* W1211 20:45:52.819837 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
* W1211 20:45:52.820176 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
* I1211 20:45:52.820205 1 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick
* I1211 20:45:52.820243 1 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick
* W1211 20:45:52.820340 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
* I1211 20:45:52.820351 1 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick
* W1211 20:45:52.820385 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
* W1211 20:45:52.820447 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
* W1211 20:45:52.820480 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
* I1211 20:45:52.820490 1 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick
* W1211 20:45:52.820535 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
* W1211 20:45:52.820597 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
* W1211 20:45:52.820631 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
* W1211 20:45:52.820686 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
* W1211 20:45:52.820704 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
* W1211 20:45:52.820813 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
* W1211 20:45:52.820828 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
* W1211 20:45:52.820844 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
* I1211 20:45:52.879396 1 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick
* W1211 20:45:52.879637 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
*
* ==> kube-apiserver [eab3db0200bb] <==
* Trace[814555369]: ---"Transaction committed" 779ms (20:46:00.970)
* Trace[814555369]: [782.260051ms] [782.260051ms] END
* I1211 20:46:01.971704 1 trace.go:205] Trace[1215939447]: "Patch" url:/api/v1/nodes/skaffold-20201211204522-6575/status,user-agent:kubelet/v1.20.0 (linux/amd64) kubernetes/af46c47,client:192.168.59.176 (11-Dec-2020 20:46:01.188) (total time: 783ms):
* Trace[1215939447]: ---"Object stored in database" 779ms (20:46:00.971)
* Trace[1215939447]: [783.061896ms] [783.061896ms] END
* I1211 20:46:01.980818 1 trace.go:205] Trace[975985850]: "List etcd3" key:/resourcequotas/kube-system,resourceVersion:,resourceVersionMatch:,limit:0,continue: (11-Dec-2020 20:46:01.199) (total time: 781ms):
* Trace[975985850]: [781.042774ms] [781.042774ms] END
* I1211 20:46:01.980835 1 trace.go:205] Trace[1046865108]: "Get" url:/apis/flowcontrol.apiserver.k8s.io/v1beta1/prioritylevelconfigurations/exempt,user-agent:kube-apiserver/v1.20.0 (linux/amd64) kubernetes/af46c47,client:127.0.0.1 (11-Dec-2020 20:46:00.994) (total time: 985ms):
* Trace[1046865108]: ---"About to write a response" 985ms (20:46:00.980)
* Trace[1046865108]: [985.847837ms] [985.847837ms] END
* I1211 20:46:01.980934 1 trace.go:205] Trace[2038522236]: "List" url:/api/v1/namespaces/kube-system/resourcequotas,user-agent:kube-apiserver/v1.20.0 (linux/amd64) kubernetes/af46c47,client:127.0.0.1 (11-Dec-2020 20:46:01.199) (total time: 781ms):
* Trace[2038522236]: ---"Listing from storage done" 781ms (20:46:00.980)
* Trace[2038522236]: [781.201044ms] [781.201044ms] END
* I1211 20:46:01.981577 1 trace.go:205] Trace[298957945]: "Create" url:/api/v1/namespaces/kube-system/events,user-agent:kubelet/v1.20.0 (linux/amd64) kubernetes/af46c47,client:192.168.59.176 (11-Dec-2020 20:46:00.990) (total time: 990ms):
* Trace[298957945]: ---"Object stored in database" 990ms (20:46:00.981)
* Trace[298957945]: [990.812752ms] [990.812752ms] END
* E1211 20:46:01.984504 1 controller.go:152] Unable to remove old endpoints from kubernetes service: no master IPs were listed in storage, refusing to erase all endpoints for the kubernetes service
* I1211 20:46:01.985187 1 trace.go:205] Trace[257055649]: "Get" url:/api/v1/namespaces/kube-system/pods/kube-apiserver-skaffold-20201211204522-6575,user-agent:kubelet/v1.20.0 (linux/amd64) kubernetes/af46c47,client:192.168.59.176 (11-Dec-2020 20:46:00.991) (total time: 993ms):
* Trace[257055649]: ---"About to write a response" 992ms (20:46:00.984)
* Trace[257055649]: [993.22922ms] [993.22922ms] END
* I1211 20:46:01.985498 1 trace.go:205] Trace[275923415]: "Create" url:/apis/events.k8s.io/v1/namespaces/kube-system/events,user-agent:kube-scheduler/v1.20.0 (linux/amd64) kubernetes/af46c47/scheduler,client:192.168.59.176 (11-Dec-2020 20:46:01.198) (total time: 787ms):
* Trace[275923415]: ---"Object stored in database" 787ms (20:46:00.985)
* Trace[275923415]: [787.187394ms] [787.187394ms] END
* I1211 20:46:01.987340 1 storage_scheduling.go:148] all system priority classes are created successfully or already exist.
* I1211 20:46:03.549595 1 controller.go:606] quota admission added evaluator for: serviceaccounts
*
* ==> kube-controller-manager [72de7737a071] <==
* I1211 20:46:03.544951 1 serviceaccounts_controller.go:117] Starting service account controller
* I1211 20:46:03.544971 1 shared_informer.go:240] Waiting for caches to sync for service account
* I1211 20:46:03.637604 1 shared_informer.go:247] Caches are synced for tokens
* I1211 20:46:03.650034 1 controllermanager.go:554] Started "statefulset"
* I1211 20:46:03.650075 1 stateful_set.go:146] Starting stateful set controller
* I1211 20:46:03.650092 1 shared_informer.go:240] Waiting for caches to sync for stateful set
* I1211 20:46:03.655574 1 node_lifecycle_controller.go:380] Sending events to api server.
* I1211 20:46:03.655804 1 taint_manager.go:163] Sending events to api server.
* I1211 20:46:03.655877 1 node_lifecycle_controller.go:508] Controller will reconcile labels.
* I1211 20:46:03.655927 1 controllermanager.go:554] Started "nodelifecycle"
* I1211 20:46:03.656071 1 node_lifecycle_controller.go:542] Starting node controller
* I1211 20:46:03.656091 1 shared_informer.go:240] Waiting for caches to sync for taint
* I1211 20:46:03.661697 1 controllermanager.go:554] Started "root-ca-cert-publisher"
* I1211 20:46:03.661802 1 publisher.go:98] Starting root CA certificate configmap publisher
* I1211 20:46:03.661817 1 shared_informer.go:240] Waiting for caches to sync for crt configmap
* I1211 20:46:03.676951 1 controllermanager.go:554] Started "bootstrapsigner"
* W1211 20:46:03.676976 1 core.go:246] configure-cloud-routes is set, but no cloud provider specified. Will not configure cloud provider routes.
* W1211 20:46:03.676984 1 controllermanager.go:546] Skipping "route"
* I1211 20:46:03.677161 1 shared_informer.go:240] Waiting for caches to sync for bootstrap_signer
* I1211 20:46:03.698375 1 controllermanager.go:554] Started "persistentvolume-expander"
* I1211 20:46:03.698553 1 expand_controller.go:310] Starting expand controller
* I1211 20:46:03.698574 1 shared_informer.go:240] Waiting for caches to sync for expand
* I1211 20:46:03.715964 1 controllermanager.go:554] Started "podgc"
* I1211 20:46:03.716079 1 gc_controller.go:89] Starting GC controller
* I1211 20:46:03.716124 1 shared_informer.go:240] Waiting for caches to sync for GC
*
* ==> kube-controller-manager [76fa734100cb] <==
* I1211 20:45:50.883906 1 graph_builder.go:289] GraphBuilder running
* I1211 20:45:51.089772 1 controllermanager.go:554] Started "daemonset"
* I1211 20:45:51.089811 1 daemon_controller.go:285] Starting daemon sets controller
* I1211 20:45:51.089821 1 shared_informer.go:240] Waiting for caches to sync for daemon sets
* I1211 20:45:51.382607 1 controllermanager.go:554] Started "replicaset"
* I1211 20:45:51.382681 1 replica_set.go:182] Starting replicaset controller
* I1211 20:45:51.382690 1 shared_informer.go:240] Waiting for caches to sync for ReplicaSet
* I1211 20:45:51.589868 1 controllermanager.go:554] Started "clusterrole-aggregation"
* I1211 20:45:51.589940 1 clusterroleaggregation_controller.go:149] Starting ClusterRoleAggregator
* I1211 20:45:51.589957 1 shared_informer.go:240] Waiting for caches to sync for ClusterRoleAggregator
* I1211 20:45:51.840058 1 controllermanager.go:554] Started "serviceaccount"
* I1211 20:45:51.840148 1 serviceaccounts_controller.go:117] Starting service account controller
* I1211 20:45:51.840208 1 shared_informer.go:240] Waiting for caches to sync for service account
* I1211 20:45:51.989339 1 controllermanager.go:554] Started "csrapproving"
* I1211 20:45:51.989394 1 certificate_controller.go:118] Starting certificate controller "csrapproving"
* I1211 20:45:51.989408 1 shared_informer.go:240] Waiting for caches to sync for certificate-csrapproving
* I1211 20:45:52.240373 1 controllermanager.go:554] Started "root-ca-cert-publisher"
* I1211 20:45:52.240454 1 publisher.go:98] Starting root CA certificate configmap publisher
* I1211 20:45:52.240463 1 shared_informer.go:240] Waiting for caches to sync for crt configmap
* I1211 20:45:52.489796 1 controllermanager.go:554] Started "endpoint"
* I1211 20:45:52.489873 1 endpoints_controller.go:184] Starting endpoint controller
* I1211 20:45:52.489882 1 shared_informer.go:240] Waiting for caches to sync for endpoint
* I1211 20:45:52.747110 1 controllermanager.go:554] Started "namespace"
* I1211 20:45:52.747180 1 namespace_controller.go:200] Starting namespace controller
* I1211 20:45:52.747188 1 shared_informer.go:240] Waiting for caches to sync for namespace
*
* ==> kube-scheduler [4dace7a8f06f] <==
* W1211 20:45:46.786830 1 requestheader_controller.go:193] Unable to get configmap/extension-apiserver-authentication in kube-system. Usually fixed by 'kubectl create rolebinding -n kube-system ROLEBINDING_NAME --role=extension-apiserver-authentication-reader --serviceaccount=YOUR_NS:YOUR_SA'
* W1211 20:45:46.786866 1 authentication.go:332] Error looking up in-cluster authentication configuration: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot get resource "configmaps" in API group "" in the namespace "kube-system"
* W1211 20:45:46.786892 1 authentication.go:333] Continuing without authentication configuration. This may treat all requests as anonymous.
* W1211 20:45:46.786904 1 authentication.go:334] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
* I1211 20:45:46.982431 1 configmap_cafile_content.go:202] Starting client-ca::kube-system::extension-apiserver-authentication::client-ca-file
* I1211 20:45:46.982463 1 shared_informer.go:240] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
* I1211 20:45:46.983112 1 tlsconfig.go:240] Starting DynamicServingCertificateController
* I1211 20:45:46.983240 1 secure_serving.go:197] Serving securely on 127.0.0.1:10259
* E1211 20:45:46.988657 1 reflector.go:138] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
* E1211 20:45:46.989014 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
* E1211 20:45:46.989125 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
* E1211 20:45:46.989242 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
* E1211 20:45:46.989355 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
* E1211 20:45:46.989364 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
* E1211 20:45:46.989477 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
* E1211 20:45:46.989512 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
* E1211 20:45:46.990052 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
* E1211 20:45:46.990066 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
* E1211 20:45:46.990221 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1beta1.PodDisruptionBudget: failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
* E1211 20:45:46.990294 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
* E1211 20:45:47.992828 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
* E1211 20:45:48.004293 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
* E1211 20:45:48.019964 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
* E1211 20:45:48.139838 1 reflector.go:138] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
* I1211 20:45:49.982618 1 shared_informer.go:247] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
*
* ==> kube-scheduler [78eae12b32cb] <==
* I1211 20:45:56.417115 1 serving.go:331] Generated self-signed cert in-memory
* W1211 20:46:01.003746 1 requestheader_controller.go:193] Unable to get configmap/extension-apiserver-authentication in kube-system. Usually fixed by 'kubectl create rolebinding -n kube-system ROLEBINDING_NAME --role=extension-apiserver-authentication-reader --serviceaccount=YOUR_NS:YOUR_SA'
* W1211 20:46:01.003779 1 authentication.go:332] Error looking up in-cluster authentication configuration: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot get resource "configmaps" in API group "" in the namespace "kube-system"
* W1211 20:46:01.003790 1 authentication.go:333] Continuing without authentication configuration. This may treat all requests as anonymous.
* W1211 20:46:01.003798 1 authentication.go:334] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
* I1211 20:46:01.095479 1 configmap_cafile_content.go:202] Starting client-ca::kube-system::extension-apiserver-authentication::client-ca-file
* I1211 20:46:01.095516 1 shared_informer.go:240] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
* I1211 20:46:01.096574 1 secure_serving.go:197] Serving securely on 127.0.0.1:10259
* I1211 20:46:01.097688 1 tlsconfig.go:240] Starting DynamicServingCertificateController
* I1211 20:46:01.195777 1 shared_informer.go:247] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
*
* ==> kubelet <==
* -- Logs begin at Fri 2020-12-11 20:45:25 UTC, end at Fri 2020-12-11 20:46:04 UTC. --
* Dec 11 20:45:51 skaffold-20201211204522-6575 kubelet[2359]: I1211 20:45:51.386710 2359 reconciler.go:157] Reconciler: start to sync state
* Dec 11 20:45:53 skaffold-20201211204522-6575 kubelet[2359]: E1211 20:45:53.032833 2359 event.go:273] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-apiserver-skaffold-20201211204522-6575.164fc46a58f1e00e", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"kube-apiserver-skaffold-20201211204522-6575", UID:"30fb9afba4c39ffe9c14831adf8aec3e", APIVersion:"v1", ResourceVersion:"", FieldPath:"spec.containers{kube-apiserver}"}, Reason:"Unhealthy", Message:"Sta
rtup probe failed: Get \"https://192.168.59.176:8443/livez\": dial tcp 192.168.59.176:8443: connect: connection refused", Source:v1.EventSource{Component:"kubelet", Host:"skaffold-20201211204522-6575"}, FirstTimestamp:v1.Time{Time:time.Time{wall:0xbfed148041ee160e, ext:3351592241, loc:(*time.Location)(0x70c7020)}}, LastTimestamp:v1.Time{Time:time.Time{wall:0xbfed148041ee160e, ext:3351592241, loc:(*time.Location)(0x70c7020)}}, Count:1, Type:"Warning", EventTime:v1.MicroTime{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Post "https://control-plane.minikube.internal:8443/api/v1/namespaces/kube-system/events": dial tcp 192.168.59.176:8443: connect: connection refused'(may retry after sleeping)
* Dec 11 20:45:53 skaffold-20201211204522-6575 kubelet[2359]: W1211 20:45:53.697931 2359 pod_container_deletor.go:79] Container "7ac653ada5bc8ead21baa69bc495bb48513c140d37673cf0a7e92d816189be4e" not found in pod's containers
* Dec 11 20:45:53 skaffold-20201211204522-6575 kubelet[2359]: W1211 20:45:53.703162 2359 pod_container_deletor.go:79] Container "a023ef9b93da9de000ced7ee32921727e7de2682baeb8e6c6c3ad72c609a8cc6" not found in pod's containers
* Dec 11 20:45:53 skaffold-20201211204522-6575 kubelet[2359]: W1211 20:45:53.703777 2359 status_manager.go:550] Failed to get status for pod "etcd-skaffold-20201211204522-6575_kube-system(4e3082b62e5f1d8c312fdb29b13562b0)": Get "https://control-plane.minikube.internal:8443/api/v1/namespaces/kube-system/pods/etcd-skaffold-20201211204522-6575": dial tcp 192.168.59.176:8443: connect: connection refused
* Dec 11 20:45:53 skaffold-20201211204522-6575 kubelet[2359]: E1211 20:45:53.706137 2359 remote_runtime.go:116] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create a sandbox for pod "etcd-skaffold-20201211204522-6575": Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?
* Dec 11 20:45:53 skaffold-20201211204522-6575 kubelet[2359]: E1211 20:45:53.706202 2359 kuberuntime_sandbox.go:70] CreatePodSandbox for pod "etcd-skaffold-20201211204522-6575_kube-system(4e3082b62e5f1d8c312fdb29b13562b0)" failed: rpc error: code = Unknown desc = failed to create a sandbox for pod "etcd-skaffold-20201211204522-6575": Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?
* Dec 11 20:45:53 skaffold-20201211204522-6575 kubelet[2359]: E1211 20:45:53.706226 2359 kuberuntime_manager.go:755] createPodSandbox for pod "etcd-skaffold-20201211204522-6575_kube-system(4e3082b62e5f1d8c312fdb29b13562b0)" failed: rpc error: code = Unknown desc = failed to create a sandbox for pod "etcd-skaffold-20201211204522-6575": Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?
* Dec 11 20:45:53 skaffold-20201211204522-6575 kubelet[2359]: E1211 20:45:53.706281 2359 pod_workers.go:191] Error syncing pod 4e3082b62e5f1d8c312fdb29b13562b0 ("etcd-skaffold-20201211204522-6575_kube-system(4e3082b62e5f1d8c312fdb29b13562b0)"), skipping: failed to "CreatePodSandbox" for "etcd-skaffold-20201211204522-6575_kube-system(4e3082b62e5f1d8c312fdb29b13562b0)" with CreatePodSandboxError: "CreatePodSandbox for pod \"etcd-skaffold-20201211204522-6575_kube-system(4e3082b62e5f1d8c312fdb29b13562b0)\" failed: rpc error: code = Unknown desc = failed to create a sandbox for pod \"etcd-skaffold-20201211204522-6575\": Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
* Dec 11 20:45:53 skaffold-20201211204522-6575 kubelet[2359]: W1211 20:45:53.708213 2359 pod_container_deletor.go:79] Container "3b8163b702b6173346f2352ed440b8496cd5512588c1a6bffb8e60cb2908fcdd" not found in pod's containers
* Dec 11 20:45:53 skaffold-20201211204522-6575 kubelet[2359]: W1211 20:45:53.709065 2359 status_manager.go:550] Failed to get status for pod "kube-scheduler-skaffold-20201211204522-6575_kube-system(3478da2c440ba32fb6c087b3f3b99813)": Get "https://control-plane.minikube.internal:8443/api/v1/namespaces/kube-system/pods/kube-scheduler-skaffold-20201211204522-6575": dial tcp 192.168.59.176:8443: connect: connection refused
* Dec 11 20:45:53 skaffold-20201211204522-6575 kubelet[2359]: E1211 20:45:53.709566 2359 kuberuntime_manager.go:965] PodSandboxStatus of sandbox "c441e30e9faf50ed575cfbbe496450d76ef94fe54f5502678139460aa60751e2" for pod "kube-controller-manager-skaffold-20201211204522-6575_kube-system(a3e7be694ef7cf952503c5d331abc0ac)" error: rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?
* Dec 11 20:45:53 skaffold-20201211204522-6575 kubelet[2359]: E1211 20:45:53.847550 2359 remote_runtime.go:116] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create a sandbox for pod "kube-apiserver-skaffold-20201211204522-6575": error during connect: Post "http://%2Fvar%2Frun%2Fdocker.sock/v1.40/containers/create?name=k8s_POD_kube-apiserver-skaffold-20201211204522-6575_kube-system_30fb9afba4c39ffe9c14831adf8aec3e_1": EOF
* Dec 11 20:45:53 skaffold-20201211204522-6575 kubelet[2359]: E1211 20:45:53.847610 2359 kuberuntime_sandbox.go:70] CreatePodSandbox for pod "kube-apiserver-skaffold-20201211204522-6575_kube-system(30fb9afba4c39ffe9c14831adf8aec3e)" failed: rpc error: code = Unknown desc = failed to create a sandbox for pod "kube-apiserver-skaffold-20201211204522-6575": error during connect: Post "http://%2Fvar%2Frun%2Fdocker.sock/v1.40/containers/create?name=k8s_POD_kube-apiserver-skaffold-20201211204522-6575_kube-system_30fb9afba4c39ffe9c14831adf8aec3e_1": EOF
* Dec 11 20:45:53 skaffold-20201211204522-6575 kubelet[2359]: E1211 20:45:53.847627 2359 kuberuntime_manager.go:755] createPodSandbox for pod "kube-apiserver-skaffold-20201211204522-6575_kube-system(30fb9afba4c39ffe9c14831adf8aec3e)" failed: rpc error: code = Unknown desc = failed to create a sandbox for pod "kube-apiserver-skaffold-20201211204522-6575": error during connect: Post "http://%2Fvar%2Frun%2Fdocker.sock/v1.40/containers/create?name=k8s_POD_kube-apiserver-skaffold-20201211204522-6575_kube-system_30fb9afba4c39ffe9c14831adf8aec3e_1": EOF
* Dec 11 20:45:53 skaffold-20201211204522-6575 kubelet[2359]: E1211 20:45:53.847546 2359 remote_runtime.go:116] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to start sandbox container for pod "kube-scheduler-skaffold-20201211204522-6575": error during connect: Post "http://%2Fvar%2Frun%2Fdocker.sock/v1.40/containers/124ac1733d1bf863ed3310cf60535bf6ad6b0fec4417f1e5912f6da3c5779eaa/start": EOF
* Dec 11 20:45:53 skaffold-20201211204522-6575 kubelet[2359]: E1211 20:45:53.847683 2359 kuberuntime_sandbox.go:70] CreatePodSandbox for pod "kube-scheduler-skaffold-20201211204522-6575_kube-system(3478da2c440ba32fb6c087b3f3b99813)" failed: rpc error: code = Unknown desc = failed to start sandbox container for pod "kube-scheduler-skaffold-20201211204522-6575": error during connect: Post "http://%2Fvar%2Frun%2Fdocker.sock/v1.40/containers/124ac1733d1bf863ed3310cf60535bf6ad6b0fec4417f1e5912f6da3c5779eaa/start": EOF
* Dec 11 20:45:53 skaffold-20201211204522-6575 kubelet[2359]: E1211 20:45:53.847689 2359 pod_workers.go:191] Error syncing pod 30fb9afba4c39ffe9c14831adf8aec3e ("kube-apiserver-skaffold-20201211204522-6575_kube-system(30fb9afba4c39ffe9c14831adf8aec3e)"), skipping: failed to "CreatePodSandbox" for "kube-apiserver-skaffold-20201211204522-6575_kube-system(30fb9afba4c39ffe9c14831adf8aec3e)" with CreatePodSandboxError: "CreatePodSandbox for pod \"kube-apiserver-skaffold-20201211204522-6575_kube-system(30fb9afba4c39ffe9c14831adf8aec3e)\" failed: rpc error: code = Unknown desc = failed to create a sandbox for pod \"kube-apiserver-skaffold-20201211204522-6575\": error during connect: Post \"http://%2Fvar%2Frun%2Fdocker.sock/v1.40/containers/create?name=k8s_POD_kube-apiserver-skaffold-20201211204522-6575_kube-system_30fb9afba4c39ffe9c14831adf8aec3e_1\": EOF"
* Dec 11 20:45:53 skaffold-20201211204522-6575 kubelet[2359]: E1211 20:45:53.847704 2359 kuberuntime_manager.go:755] createPodSandbox for pod "kube-scheduler-skaffold-20201211204522-6575_kube-system(3478da2c440ba32fb6c087b3f3b99813)" failed: rpc error: code = Unknown desc = failed to start sandbox container for pod "kube-scheduler-skaffold-20201211204522-6575": error during connect: Post "http://%2Fvar%2Frun%2Fdocker.sock/v1.40/containers/124ac1733d1bf863ed3310cf60535bf6ad6b0fec4417f1e5912f6da3c5779eaa/start": EOF
* Dec 11 20:45:53 skaffold-20201211204522-6575 kubelet[2359]: E1211 20:45:53.847749 2359 pod_workers.go:191] Error syncing pod 3478da2c440ba32fb6c087b3f3b99813 ("kube-scheduler-skaffold-20201211204522-6575_kube-system(3478da2c440ba32fb6c087b3f3b99813)"), skipping: failed to "CreatePodSandbox" for "kube-scheduler-skaffold-20201211204522-6575_kube-system(3478da2c440ba32fb6c087b3f3b99813)" with CreatePodSandboxError: "CreatePodSandbox for pod \"kube-scheduler-skaffold-20201211204522-6575_kube-system(3478da2c440ba32fb6c087b3f3b99813)\" failed: rpc error: code = Unknown desc = failed to start sandbox container for pod \"kube-scheduler-skaffold-20201211204522-6575\": error during connect: Post \"http://%2Fvar%2Frun%2Fdocker.sock/v1.40/containers/124ac1733d1bf863ed3310cf60535bf6ad6b0fec4417f1e5912f6da3c5779eaa/start\": EOF"
* Dec 11 20:45:54 skaffold-20201211204522-6575 kubelet[2359]: W1211 20:45:54.719870 2359 pod_container_deletor.go:79] Container "124ac1733d1bf863ed3310cf60535bf6ad6b0fec4417f1e5912f6da3c5779eaa" not found in pod's containers
* Dec 11 20:45:54 skaffold-20201211204522-6575 kubelet[2359]: W1211 20:45:54.724877 2359 pod_container_deletor.go:79] Container "c441e30e9faf50ed575cfbbe496450d76ef94fe54f5502678139460aa60751e2" not found in pod's containers
* Dec 11 20:45:54 skaffold-20201211204522-6575 kubelet[2359]: W1211 20:45:54.725447 2359 status_manager.go:550] Failed to get status for pod "kube-controller-manager-skaffold-20201211204522-6575_kube-system(a3e7be694ef7cf952503c5d331abc0ac)": Get "https://control-plane.minikube.internal:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-skaffold-20201211204522-6575": dial tcp 192.168.59.176:8443: connect: connection refused
* Dec 11 20:45:54 skaffold-20201211204522-6575 kubelet[2359]: W1211 20:45:54.730678 2359 status_manager.go:550] Failed to get status for pod "kube-apiserver-skaffold-20201211204522-6575_kube-system(30fb9afba4c39ffe9c14831adf8aec3e)": Get "https://control-plane.minikube.internal:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-skaffold-20201211204522-6575": dial tcp 192.168.59.176:8443: connect: connection refused
* Dec 11 20:45:55 skaffold-20201211204522-6575 kubelet[2359]: W1211 20:45:55.791474 2359 status_manager.go:550] Failed to get status for pod "kube-controller-manager-skaffold-20201211204522-6575_kube-system(a3e7be694ef7cf952503c5d331abc0ac)": Get "https://control-plane.minikube.internal:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-skaffold-20201211204522-6575": dial tcp 192.168.59.176:8443: connect: connection refused
-- /stdout --
helpers_test.go:248: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p skaffold-20201211204522-6575 -n skaffold-20201211204522-6575
helpers_test.go:255: (dbg) Run: kubectl --context skaffold-20201211204522-6575 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:261: non-running pods: storage-provisioner
helpers_test.go:263: ======> post-mortem[TestSkaffold]: describe non-running pods <======
helpers_test.go:266: (dbg) Run: kubectl --context skaffold-20201211204522-6575 describe pod storage-provisioner
helpers_test.go:266: (dbg) Non-zero exit: kubectl --context skaffold-20201211204522-6575 describe pod storage-provisioner: exit status 1 (80.362355ms)
** stderr **
Error from server (NotFound): pods "storage-provisioner" not found
** /stderr **
helpers_test.go:268: kubectl --context skaffold-20201211204522-6575 describe pod storage-provisioner: exit status 1
helpers_test.go:171: Cleaning up "skaffold-20201211204522-6575" profile ...
helpers_test.go:172: (dbg) Run: out/minikube-linux-amd64 delete -p skaffold-20201211204522-6575
helpers_test.go:172: (dbg) Done: out/minikube-linux-amd64 delete -p skaffold-20201211204522-6575: (2.764831245s)
--- FAIL: TestSkaffold (45.20s)