=== RUN TestFunctional/serial/ComponentHealth
functional_test.go:811: (dbg) Run: kubectl --context functional-20220202214710-386638 get po -l tier=control-plane -n kube-system -o=json
functional_test.go:826: etcd phase: Running
functional_test.go:836: etcd status: Ready
functional_test.go:826: kube-apiserver phase: Running
functional_test.go:834: kube-apiserver is not Ready: {Phase:Running Conditions:[{Type:Initialized Status:True} {Type:Ready Status:False} {Type:ContainersReady Status:False} {Type:PodScheduled Status:True}] Message: Reason: HostIP:192.168.49.2 PodIP:192.168.49.2 StartTime:2022-02-02 21:47:36 +0000 UTC ContainerStatuses:[{Name:kube-apiserver State:{Waiting:<nil> Running:0xc0010895f0 Terminated:<nil>} LastTerminationState:{Waiting:<nil> Running:<nil> Terminated:0xc000692150} Ready:false RestartCount:1 Image:k8s.gcr.io/kube-apiserver:v1.23.2 ImageID:docker-pullable://k8s.gcr.io/kube-apiserver@sha256:63ede81b7e1fbb51669f4ee461481815f50eeed1f95e48558e3b8c3dace58a0f ContainerID:docker://16f643730c8ff3e229773759904dc292039b4b0cca5a89909b2a0f64c57c469e}]}
functional_test.go:826: kube-controller-manager phase: Running
functional_test.go:836: kube-controller-manager status: Ready
functional_test.go:826: kube-scheduler phase: Running
functional_test.go:836: kube-scheduler status: Ready
helpers_test.go:223: -----------------------post-mortem--------------------------------
helpers_test.go:231: ======> post-mortem[TestFunctional/serial/ComponentHealth]: docker inspect <======
helpers_test.go:232: (dbg) Run: docker inspect functional-20220202214710-386638
helpers_test.go:236: (dbg) docker inspect functional-20220202214710-386638:
-- stdout --
[
{
"Id": "25093bd177af799bcd7a944c7034989d33dd21f9b824b67856a8389eb846e95e",
"Created": "2022-02-02T21:47:19.785576666Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 409225,
"ExitCode": 0,
"Error": "",
"StartedAt": "2022-02-02T21:47:20.121491302Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:64d09634c60d2a75596bc705aa84bdc41f76fe47c5d9ee362550bffbdc256979",
"ResolvConfPath": "/var/lib/docker/containers/25093bd177af799bcd7a944c7034989d33dd21f9b824b67856a8389eb846e95e/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/25093bd177af799bcd7a944c7034989d33dd21f9b824b67856a8389eb846e95e/hostname",
"HostsPath": "/var/lib/docker/containers/25093bd177af799bcd7a944c7034989d33dd21f9b824b67856a8389eb846e95e/hosts",
"LogPath": "/var/lib/docker/containers/25093bd177af799bcd7a944c7034989d33dd21f9b824b67856a8389eb846e95e/25093bd177af799bcd7a944c7034989d33dd21f9b824b67856a8389eb846e95e-json.log",
"Name": "/functional-20220202214710-386638",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"functional-20220202214710-386638:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "functional-20220202214710-386638",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8441/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"ConsoleSize": [
0,
0
],
"Isolation": "",
"CpuShares": 0,
"Memory": 0,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": null,
"BlkioDeviceWriteBps": null,
"BlkioDeviceReadIOps": null,
"BlkioDeviceWriteIOps": null,
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [
{
"PathOnHost": "/dev/fuse",
"PathInContainer": "/dev/fuse",
"CgroupPermissions": "rwm"
}
],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"KernelMemory": 0,
"KernelMemoryTCP": 0,
"MemoryReservation": 0,
"MemorySwap": 0,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": null,
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"LowerDir": "/var/lib/docker/overlay2/eaf65fc43467a6bb44f91633dbf8c411f563d71ea0df9176fa4f414f0f82276c-init/diff:/var/lib/docker/overlay2/d4663ead96d8fac7b028dde763b7445fdff56593784b7a04a0c4c7450b12ac8a/diff:/var/lib/docker/overlay2/f0c766a8d59c3075c44f5eaf54a88aef49ac3a770e6f1e3ac6ebd4004f5b70e2/diff:/var/lib/docker/overlay2/03f8cecf4339603da26d55f367194130430755e9c21844c70ce3d30bd8d5d776/diff:/var/lib/docker/overlay2/8c56519bb2995287e5231612d5ca3809d3ca82a08d3fb88d4bc3f28acb44c548/diff:/var/lib/docker/overlay2/cfdceedf4766b92de092fc07ad1b8f1f378126b680f71754acd32502d082ac4c/diff:/var/lib/docker/overlay2/243a3de48d24643b038a407d872fc1ebb1cca9719c882859b2e65a71ba051c3d/diff:/var/lib/docker/overlay2/73d9c1c910d6be4b6f1719f9ad777c50baedc16cb66f6167f61c34c3535d6aa8/diff:/var/lib/docker/overlay2/414a2e06f368b9a6893993643cc902550952ea16b431d03ef81177a67bcc6055/diff:/var/lib/docker/overlay2/237cb26dc1fb33617d977694b7036e843ca7076f249a87cedb9719f8bb852369/diff:/var/lib/docker/overlay2/f94a67
39f2f53cb0adbb52fccb7daf09ff60a575e9ef35eedbfae9a115cb0bee/diff:/var/lib/docker/overlay2/1a7b8bc08aeb75e64990bf84e55e245d3ccba13a7248844f2a2b41a179987edd/diff:/var/lib/docker/overlay2/9d6fe9ebc7ebbd17697484e59d73c2e56a57b9efd010b504af3e94f33693a302/diff:/var/lib/docker/overlay2/a6b04596431127c96091ac1a60b24c2efd5dc5925d3a6be2c7c991a40f0fba61/diff:/var/lib/docker/overlay2/ddffede76ffd319874db8f340cf399929a918323513065094964ebc981ccebe6/diff:/var/lib/docker/overlay2/873af33e16ed022cdbff8f367fac5f511da2edbe653c3a4df4b38f17018fde26/diff:/var/lib/docker/overlay2/49ecfae1413a927bd924c5c004499b9af18da6c25beffa6da10506397419e246/diff:/var/lib/docker/overlay2/8663e1a8bea6b4285860191688fcf3d3aa95f958547b7d2918feda19facc72d2/diff:/var/lib/docker/overlay2/96864535f6abf106236521f0aa4d98958c91533ecc34864088813a5d638d7a85/diff:/var/lib/docker/overlay2/3245e931c6f0447b1c6dd192323b06a5580c4cb9c80e63e19c886107effec1a8/diff:/var/lib/docker/overlay2/fbfc10643f3968343b6f304ba573ab22123857f0ac7bbdf796e69cc759ffcb01/diff:/var/lib/d
ocker/overlay2/008c499b0a1d502f449e9408eb9d7f0d3fd1f927c6fed14c2daf9128f2481a2e/diff:/var/lib/docker/overlay2/049cceba63d6fda39cec7c7c348ae0046d3bcfc9a354ef8c20d2cb0da0c6126e/diff:/var/lib/docker/overlay2/7423dec7519f3618cdbd580c816a41a86bffe6f544fe6e6c90b0891ab319effe/diff:/var/lib/docker/overlay2/b78015fe190a7617cff46e43a3b7a90d608036971a3c758aab0d4c814064c775/diff:/var/lib/docker/overlay2/f1c7b371c8afb5d9df1ad0b6bcf5860b7d0931bc04f95f00c2f7dc66076996d6/diff:/var/lib/docker/overlay2/68d4abf197eeabb5c097584a1527cd6993fb2d55b0fac9957ec46f8412efdf06/diff:/var/lib/docker/overlay2/f08b8daa4f1c25becadfdae5150584a3dd3ac3bf46afaa6e101fe8e0823572f4/diff:/var/lib/docker/overlay2/1965ab77a969620854fa7e23a0c745af7766a48e9ec2abacecc3e064d1c8fa6a/diff:/var/lib/docker/overlay2/e7cbe6b577242fb8b973317eaa8ee217a8a9ee355b88362b66d45d718b3b2c4b/diff:/var/lib/docker/overlay2/c59e06d8f5c93ed9cb94a83e137c16f3dcdea80b9dceccba323b6ddc7543de46/diff:/var/lib/docker/overlay2/d2e3ed906400776c06ca0502e30b187ca7e8cafdf00da3a54c16cd3818f
76bbc/diff:/var/lib/docker/overlay2/8751d7f7a388ed73174c9365d765716ea6d4d513683a025fe6e322b37e0ffa17/diff:/var/lib/docker/overlay2/e19c84986e7254f1600c2a35898ef2158b4e5b77f2ce8cdf017c2f326ffc0491/diff:/var/lib/docker/overlay2/3dc4411ebe2379955bd8260b29d8faa36b7e965e38b15b19cc65ad0a63e431f6/diff:/var/lib/docker/overlay2/2cae1638c524a830e44f0cb4b8db0e6063415a57346d1d190e50edea3c78df73/diff:/var/lib/docker/overlay2/9c15e8e15ab0ee2a47827fef4273bd0d4ffc315726879f2f422a01be6116fcb2/diff:/var/lib/docker/overlay2/d39456e34bd05af837a974416337cc6b9f6ea243f25e9033212a340da93d3154/diff:/var/lib/docker/overlay2/c0101867e0d0e0ff5aaf7104e95cb6cab78625c9cd8697d2a4f28fff809159ff/diff:/var/lib/docker/overlay2/f1c53d89ed6960deaee63188b5bffd5f88edaf3546c4312205f3b465f7bca9b5/diff:/var/lib/docker/overlay2/2685ce865e736b98fc7e2e1447bdbd580080188c81a14278cf54b8e8dedbf1d9/diff:/var/lib/docker/overlay2/985637e295ac0794f3d93fd241c0526bb5ac4c727f5680fc30c1ed3dde3598ae/diff:/var/lib/docker/overlay2/b635e671b4d1409cfd8b2da5e825f1ec95394c
fc12c58befe6073fbb72ba380d/diff:/var/lib/docker/overlay2/1947bd69c7dfab5dd5faf9672bfd7026287734dc23ee3e44777917f2f0a5a94a/diff:/var/lib/docker/overlay2/584a032dd69c42439df99f15e55b3cdf7afb4340f59e1938ce4e32d8f154f45b/diff",
"MergedDir": "/var/lib/docker/overlay2/eaf65fc43467a6bb44f91633dbf8c411f563d71ea0df9176fa4f414f0f82276c/merged",
"UpperDir": "/var/lib/docker/overlay2/eaf65fc43467a6bb44f91633dbf8c411f563d71ea0df9176fa4f414f0f82276c/diff",
"WorkDir": "/var/lib/docker/overlay2/eaf65fc43467a6bb44f91633dbf8c411f563d71ea0df9176fa4f414f0f82276c/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "functional-20220202214710-386638",
"Source": "/var/lib/docker/volumes/functional-20220202214710-386638/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "functional-20220202214710-386638",
"Domainname": "",
"User": "root",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8441/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase:v0.0.29@sha256:be897edc9ed473a9678010f390a0092f488f6a1c30865f571c3b6388f9f56f9b",
"Volumes": null,
"WorkingDir": "",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "functional-20220202214710-386638",
"name.minikube.sigs.k8s.io": "functional-20220202214710-386638",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "4522876337a0f7d10e58eac6e758ba0a0ac8e0013dd66fdfbfa52bc414c0570e",
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "49242"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "49241"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "49238"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "49240"
}
],
"8441/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "49239"
}
]
},
"SandboxKey": "/var/run/docker/netns/4522876337a0",
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"functional-20220202214710-386638": {
"IPAMConfig": {
"IPv4Address": "192.168.49.2"
},
"Links": null,
"Aliases": [
"25093bd177af",
"functional-20220202214710-386638"
],
"NetworkID": "47c670682efd35dcbb7bca5b9ce18a4524cd9be2130ea6e48bfdbaa04a12cca5",
"EndpointID": "c32dd31a6be38be19e1d5e925b737e59c249b4f7b22560ad209c592c2545e015",
"Gateway": "192.168.49.1",
"IPAddress": "192.168.49.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"MacAddress": "02:42:c0:a8:31:02",
"DriverOpts": null
}
}
}
}
]
-- /stdout --
helpers_test.go:240: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p functional-20220202214710-386638 -n functional-20220202214710-386638
helpers_test.go:245: <<< TestFunctional/serial/ComponentHealth FAILED: start of post-mortem logs <<<
helpers_test.go:246: ======> post-mortem[TestFunctional/serial/ComponentHealth]: minikube logs <======
helpers_test.go:248: (dbg) Run: out/minikube-linux-amd64 -p functional-20220202214710-386638 logs -n 25
helpers_test.go:248: (dbg) Done: out/minikube-linux-amd64 -p functional-20220202214710-386638 logs -n 25: (1.255525527s)
helpers_test.go:253: TestFunctional/serial/ComponentHealth logs:
-- stdout --
*
* ==> Audit <==
* |---------|--------------------------------------------------------------------------|----------------------------------|---------|---------|-------------------------------|-------------------------------|
| Command | Args | Profile | User | Version | Start Time | End Time |
|---------|--------------------------------------------------------------------------|----------------------------------|---------|---------|-------------------------------|-------------------------------|
| -p | nospam-20220202214626-386638 | nospam-20220202214626-386638 | jenkins | v1.25.1 | Wed, 02 Feb 2022 21:46:56 UTC | Wed, 02 Feb 2022 21:46:56 UTC |
| | --log_dir | | | | | |
| | /tmp/nospam-20220202214626-386638 | | | | | |
| | unpause | | | | | |
| -p | nospam-20220202214626-386638 | nospam-20220202214626-386638 | jenkins | v1.25.1 | Wed, 02 Feb 2022 21:46:56 UTC | Wed, 02 Feb 2022 21:46:57 UTC |
| | --log_dir | | | | | |
| | /tmp/nospam-20220202214626-386638 | | | | | |
| | unpause | | | | | |
| -p | nospam-20220202214626-386638 | nospam-20220202214626-386638 | jenkins | v1.25.1 | Wed, 02 Feb 2022 21:46:57 UTC | Wed, 02 Feb 2022 21:46:57 UTC |
| | --log_dir | | | | | |
| | /tmp/nospam-20220202214626-386638 | | | | | |
| | unpause | | | | | |
| -p | nospam-20220202214626-386638 | nospam-20220202214626-386638 | jenkins | v1.25.1 | Wed, 02 Feb 2022 21:46:57 UTC | Wed, 02 Feb 2022 21:47:08 UTC |
| | --log_dir | | | | | |
| | /tmp/nospam-20220202214626-386638 | | | | | |
| | stop | | | | | |
| -p | nospam-20220202214626-386638 | nospam-20220202214626-386638 | jenkins | v1.25.1 | Wed, 02 Feb 2022 21:47:08 UTC | Wed, 02 Feb 2022 21:47:08 UTC |
| | --log_dir | | | | | |
| | /tmp/nospam-20220202214626-386638 | | | | | |
| | stop | | | | | |
| -p | nospam-20220202214626-386638 | nospam-20220202214626-386638 | jenkins | v1.25.1 | Wed, 02 Feb 2022 21:47:08 UTC | Wed, 02 Feb 2022 21:47:08 UTC |
| | --log_dir | | | | | |
| | /tmp/nospam-20220202214626-386638 | | | | | |
| | stop | | | | | |
| delete | -p | nospam-20220202214626-386638 | jenkins | v1.25.1 | Wed, 02 Feb 2022 21:47:08 UTC | Wed, 02 Feb 2022 21:47:10 UTC |
| | nospam-20220202214626-386638 | | | | | |
| start | -p | functional-20220202214710-386638 | jenkins | v1.25.1 | Wed, 02 Feb 2022 21:47:10 UTC | Wed, 02 Feb 2022 21:47:53 UTC |
| | functional-20220202214710-386638 | | | | | |
| | --memory=4000 | | | | | |
| | --apiserver-port=8441 | | | | | |
| | --wait=all --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| start | -p | functional-20220202214710-386638 | jenkins | v1.25.1 | Wed, 02 Feb 2022 21:47:53 UTC | Wed, 02 Feb 2022 21:47:58 UTC |
| | functional-20220202214710-386638 | | | | | |
| | --alsologtostderr -v=8 | | | | | |
| -p | functional-20220202214710-386638 | functional-20220202214710-386638 | jenkins | v1.25.1 | Wed, 02 Feb 2022 21:47:58 UTC | Wed, 02 Feb 2022 21:47:59 UTC |
| | cache add k8s.gcr.io/pause:3.1 | | | | | |
| -p | functional-20220202214710-386638 | functional-20220202214710-386638 | jenkins | v1.25.1 | Wed, 02 Feb 2022 21:47:59 UTC | Wed, 02 Feb 2022 21:48:00 UTC |
| | cache add k8s.gcr.io/pause:3.3 | | | | | |
| -p | functional-20220202214710-386638 | functional-20220202214710-386638 | jenkins | v1.25.1 | Wed, 02 Feb 2022 21:48:00 UTC | Wed, 02 Feb 2022 21:48:01 UTC |
| | cache add | | | | | |
| | k8s.gcr.io/pause:latest | | | | | |
| -p | functional-20220202214710-386638 cache add | functional-20220202214710-386638 | jenkins | v1.25.1 | Wed, 02 Feb 2022 21:48:01 UTC | Wed, 02 Feb 2022 21:48:02 UTC |
| | minikube-local-cache-test:functional-20220202214710-386638 | | | | | |
| -p | functional-20220202214710-386638 cache delete | functional-20220202214710-386638 | jenkins | v1.25.1 | Wed, 02 Feb 2022 21:48:02 UTC | Wed, 02 Feb 2022 21:48:02 UTC |
| | minikube-local-cache-test:functional-20220202214710-386638 | | | | | |
| cache | delete k8s.gcr.io/pause:3.3 | minikube | jenkins | v1.25.1 | Wed, 02 Feb 2022 21:48:02 UTC | Wed, 02 Feb 2022 21:48:02 UTC |
| cache | list | minikube | jenkins | v1.25.1 | Wed, 02 Feb 2022 21:48:03 UTC | Wed, 02 Feb 2022 21:48:03 UTC |
| -p | functional-20220202214710-386638 | functional-20220202214710-386638 | jenkins | v1.25.1 | Wed, 02 Feb 2022 21:48:03 UTC | Wed, 02 Feb 2022 21:48:03 UTC |
| | ssh sudo crictl images | | | | | |
| -p | functional-20220202214710-386638 | functional-20220202214710-386638 | jenkins | v1.25.1 | Wed, 02 Feb 2022 21:48:03 UTC | Wed, 02 Feb 2022 21:48:03 UTC |
| | ssh sudo docker rmi | | | | | |
| | k8s.gcr.io/pause:latest | | | | | |
| -p | functional-20220202214710-386638 | functional-20220202214710-386638 | jenkins | v1.25.1 | Wed, 02 Feb 2022 21:48:04 UTC | Wed, 02 Feb 2022 21:48:04 UTC |
| | cache reload | | | | | |
| -p | functional-20220202214710-386638 | functional-20220202214710-386638 | jenkins | v1.25.1 | Wed, 02 Feb 2022 21:48:04 UTC | Wed, 02 Feb 2022 21:48:05 UTC |
| | ssh sudo crictl inspecti | | | | | |
| | k8s.gcr.io/pause:latest | | | | | |
| cache | delete k8s.gcr.io/pause:3.1 | minikube | jenkins | v1.25.1 | Wed, 02 Feb 2022 21:48:05 UTC | Wed, 02 Feb 2022 21:48:05 UTC |
| cache | delete k8s.gcr.io/pause:latest | minikube | jenkins | v1.25.1 | Wed, 02 Feb 2022 21:48:05 UTC | Wed, 02 Feb 2022 21:48:05 UTC |
| -p | functional-20220202214710-386638 | functional-20220202214710-386638 | jenkins | v1.25.1 | Wed, 02 Feb 2022 21:48:05 UTC | Wed, 02 Feb 2022 21:48:05 UTC |
| | kubectl -- --context | | | | | |
| | functional-20220202214710-386638 | | | | | |
| | get pods | | | | | |
| kubectl | --profile=functional-20220202214710-386638 | functional-20220202214710-386638 | jenkins | v1.25.1 | Wed, 02 Feb 2022 21:48:05 UTC | Wed, 02 Feb 2022 21:48:05 UTC |
| | -- --context | | | | | |
| | functional-20220202214710-386638 get pods | | | | | |
| start | -p functional-20220202214710-386638 | functional-20220202214710-386638 | jenkins | v1.25.1 | Wed, 02 Feb 2022 21:48:05 UTC | Wed, 02 Feb 2022 21:48:30 UTC |
| | --extra-config=apiserver.enable-admission-plugins=NamespaceAutoProvision | | | | | |
| | --wait=all | | | | | |
|---------|--------------------------------------------------------------------------|----------------------------------|---------|---------|-------------------------------|-------------------------------|
*
* ==> Last Start <==
* Log file created at: 2022/02/02 21:48:05
Running on machine: ubuntu-20-agent-3
Binary: Built with gc go1.17.6 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0202 21:48:05.593079 414960 out.go:297] Setting OutFile to fd 1 ...
I0202 21:48:05.593139 414960 out.go:344] TERM=,COLORTERM=, which probably does not support color
I0202 21:48:05.593143 414960 out.go:310] Setting ErrFile to fd 2...
I0202 21:48:05.593147 414960 out.go:344] TERM=,COLORTERM=, which probably does not support color
I0202 21:48:05.593289 414960 root.go:315] Updating PATH: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/bin
I0202 21:48:05.593511 414960 out.go:304] Setting JSON to false
I0202 21:48:05.594732 414960 start.go:112] hostinfo: {"hostname":"ubuntu-20-agent-3","uptime":19838,"bootTime":1643818648,"procs":505,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.11.0-1029-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I0202 21:48:05.594791 414960 start.go:122] virtualization: kvm guest
I0202 21:48:05.597348 414960 out.go:176] * [functional-20220202214710-386638] minikube v1.25.1 on Ubuntu 20.04 (kvm/amd64)
I0202 21:48:05.599046 414960 out.go:176] - MINIKUBE_LOCATION=13251
I0202 21:48:05.597525 414960 notify.go:174] Checking for updates...
I0202 21:48:05.600544 414960 out.go:176] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0202 21:48:05.602000 414960 out.go:176] - KUBECONFIG=/home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/kubeconfig
I0202 21:48:05.603390 414960 out.go:176] - MINIKUBE_HOME=/home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube
I0202 21:48:05.604861 414960 out.go:176] - MINIKUBE_BIN=out/minikube-linux-amd64
I0202 21:48:05.605326 414960 config.go:176] Loaded profile config "functional-20220202214710-386638": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.23.2
I0202 21:48:05.605390 414960 driver.go:344] Setting default libvirt URI to qemu:///system
I0202 21:48:05.645724 414960 docker.go:132] docker version: linux-20.10.12
I0202 21:48:05.645837 414960 cli_runner.go:133] Run: docker system info --format "{{json .}}"
I0202 21:48:05.731456 414960 info.go:263] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:73 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:true KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:35 OomKillDisable:true NGoroutines:39 SystemTime:2022-02-02 21:48:05.675684236 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.11.0-1029-gcp OperatingSystem:Ubuntu 20.04.3 LTS OSType:linux Architecture:x86_64 IndexSer
verAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33663639552 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-3 Labels:[] ExperimentalBuild:false ServerVersion:20.10.12 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:7b11cfaabd73bb80907dd23182b9347b4245eb5d Expected:7b11cfaabd73bb80907dd23182b9347b4245eb5d} RuncCommit:{ID:v1.0.2-0-g52b36a2 Expected:v1.0.2-0-g52b36a2} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=default] ProductLicense: Warnings:<nil> ServerErrors:[] Clien
tInfo:{Debug:false Plugins:[map[Experimental:true Name:app Path:/usr/libexec/docker/cli-plugins/docker-app SchemaVersion:0.1.0 ShortDescription:Docker App Vendor:Docker Inc. Version:v0.9.1-beta3] map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.7.1-docker] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.12.0]] Warnings:<nil>}}
I0202 21:48:05.731550 414960 docker.go:237] overlay module found
I0202 21:48:05.733681 414960 out.go:176] * Using the docker driver based on existing profile
I0202 21:48:05.733703 414960 start.go:281] selected driver: docker
I0202 21:48:05.733708 414960 start.go:798] validating driver "docker" against &{Name:functional-20220202214710-386638 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.29@sha256:be897edc9ed473a9678010f390a0092f488f6a1c30865f571c3b6388f9f56f9b Memory:4000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.23.2 ClusterName:functional-20220202214710-386638 Namespace:default APIServerName:minikubeCA APISe
rverNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: ExtraOptions:[{Component:kubelet Key:housekeeping-interval Value:5m}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8441 NodeName:} Nodes:[{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.23.2 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[ambassador:false auto-pause:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false helm-tiller:false ingress:false ingress-dns:false istio:false istio-provisioner:false kong:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true
storage-provisioner-gluster:false volumesnapshots:false] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false}
I0202 21:48:05.733817 414960 start.go:809] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc:}
I0202 21:48:05.733983 414960 cli_runner.go:133] Run: docker system info --format "{{json .}}"
I0202 21:48:05.818349 414960 info.go:263] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:73 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:true KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:35 OomKillDisable:true NGoroutines:39 SystemTime:2022-02-02 21:48:05.762280792 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.11.0-1029-gcp OperatingSystem:Ubuntu 20.04.3 LTS OSType:linux Architecture:x86_64 IndexSer
verAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33663639552 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-3 Labels:[] ExperimentalBuild:false ServerVersion:20.10.12 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:7b11cfaabd73bb80907dd23182b9347b4245eb5d Expected:7b11cfaabd73bb80907dd23182b9347b4245eb5d} RuncCommit:{ID:v1.0.2-0-g52b36a2 Expected:v1.0.2-0-g52b36a2} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=default] ProductLicense: Warnings:<nil> ServerErrors:[] Clien
tInfo:{Debug:false Plugins:[map[Experimental:true Name:app Path:/usr/libexec/docker/cli-plugins/docker-app SchemaVersion:0.1.0 ShortDescription:Docker App Vendor:Docker Inc. Version:v0.9.1-beta3] map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.7.1-docker] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.12.0]] Warnings:<nil>}}
I0202 21:48:05.819129 414960 start_flags.go:397] setting extra-config: kubelet.housekeeping-interval=5m
I0202 21:48:05.819156 414960 start_flags.go:831] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0202 21:48:05.819176 414960 cni.go:93] Creating CNI manager for ""
I0202 21:48:05.819184 414960 cni.go:167] CNI unnecessary in this configuration, recommending no CNI
I0202 21:48:05.819191 414960 start_flags.go:302] config:
{Name:functional-20220202214710-386638 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.29@sha256:be897edc9ed473a9678010f390a0092f488f6a1c30865f571c3b6388f9f56f9b Memory:4000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.23.2 ClusterName:functional-20220202214710-386638 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRI
Socket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision} {Component:kubelet Key:housekeeping-interval Value:5m}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8441 NodeName:} Nodes:[{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.23.2 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[ambassador:false auto-pause:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false helm-tiller:false ingress:false ingress-dns:false istio:false istio-provisioner:false kong:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true
storage-provisioner-gluster:false volumesnapshots:false] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false}
I0202 21:48:05.821797 414960 out.go:176] * Starting control plane node functional-20220202214710-386638 in cluster functional-20220202214710-386638
I0202 21:48:05.821833 414960 cache.go:120] Beginning downloading kic base image for docker with docker
I0202 21:48:05.823536 414960 out.go:176] * Pulling base image ...
I0202 21:48:05.823557 414960 preload.go:132] Checking if preload exists for k8s version v1.23.2 and runtime docker
I0202 21:48:05.823584 414960 preload.go:148] Found local preload: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v17-v1.23.2-docker-overlay2-amd64.tar.lz4
I0202 21:48:05.823589 414960 cache.go:57] Caching tarball of preloaded images
I0202 21:48:05.823644 414960 image.go:75] Checking for gcr.io/k8s-minikube/kicbase:v0.0.29@sha256:be897edc9ed473a9678010f390a0092f488f6a1c30865f571c3b6388f9f56f9b in local docker daemon
I0202 21:48:05.823783 414960 preload.go:174] Found /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v17-v1.23.2-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I0202 21:48:05.823792 414960 cache.go:60] Finished verifying existence of preloaded tar for v1.23.2 on docker
I0202 21:48:05.823915 414960 profile.go:148] Saving config to /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/profiles/functional-20220202214710-386638/config.json ...
I0202 21:48:05.866788 414960 image.go:79] Found gcr.io/k8s-minikube/kicbase:v0.0.29@sha256:be897edc9ed473a9678010f390a0092f488f6a1c30865f571c3b6388f9f56f9b in local docker daemon, skipping pull
I0202 21:48:05.866801 414960 cache.go:142] gcr.io/k8s-minikube/kicbase:v0.0.29@sha256:be897edc9ed473a9678010f390a0092f488f6a1c30865f571c3b6388f9f56f9b exists in daemon, skipping load
I0202 21:48:05.866816 414960 cache.go:208] Successfully downloaded all kic artifacts
I0202 21:48:05.866842 414960 start.go:313] acquiring machines lock for functional-20220202214710-386638: {Name:mk465eb77dcfd76fe2db25b4ec9abb51fe719307 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0202 21:48:05.866925 414960 start.go:317] acquired machines lock for "functional-20220202214710-386638" in 66.807µs
I0202 21:48:05.866938 414960 start.go:93] Skipping create...Using existing machine configuration
I0202 21:48:05.866942 414960 fix.go:55] fixHost starting:
I0202 21:48:05.867176 414960 cli_runner.go:133] Run: docker container inspect functional-20220202214710-386638 --format={{.State.Status}}
I0202 21:48:05.896339 414960 fix.go:108] recreateIfNeeded on functional-20220202214710-386638: state=Running err=<nil>
W0202 21:48:05.896362 414960 fix.go:134] unexpected machine state, will restart: <nil>
I0202 21:48:05.898777 414960 out.go:176] * Updating the running docker "functional-20220202214710-386638" container ...
I0202 21:48:05.898805 414960 machine.go:88] provisioning docker machine ...
I0202 21:48:05.898828 414960 ubuntu.go:169] provisioning hostname "functional-20220202214710-386638"
I0202 21:48:05.898878 414960 cli_runner.go:133] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-20220202214710-386638
I0202 21:48:05.929902 414960 main.go:130] libmachine: Using SSH client type: native
I0202 21:48:05.930062 414960 main.go:130] libmachine: &{{{<nil> 0 [] [] []} docker [0x7a0d20] 0x7a3e00 <nil> [] 0s} 127.0.0.1 49242 <nil> <nil>}
I0202 21:48:05.930071 414960 main.go:130] libmachine: About to run SSH command:
sudo hostname functional-20220202214710-386638 && echo "functional-20220202214710-386638" | sudo tee /etc/hostname
I0202 21:48:06.066520 414960 main.go:130] libmachine: SSH cmd err, output: <nil>: functional-20220202214710-386638
I0202 21:48:06.066595 414960 cli_runner.go:133] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-20220202214710-386638
I0202 21:48:06.098943 414960 main.go:130] libmachine: Using SSH client type: native
I0202 21:48:06.099092 414960 main.go:130] libmachine: &{{{<nil> 0 [] [] []} docker [0x7a0d20] 0x7a3e00 <nil> [] 0s} 127.0.0.1 49242 <nil> <nil>}
I0202 21:48:06.099109 414960 main.go:130] libmachine: About to run SSH command:
if ! grep -xq '.*\sfunctional-20220202214710-386638' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 functional-20220202214710-386638/g' /etc/hosts;
else
echo '127.0.1.1 functional-20220202214710-386638' | sudo tee -a /etc/hosts;
fi
fi
I0202 21:48:06.230122 414960 main.go:130] libmachine: SSH cmd err, output: <nil>:
I0202 21:48:06.230145 414960 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube CaCertPath:/home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/certs/key.pem
ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube}
I0202 21:48:06.230180 414960 ubuntu.go:177] setting up certificates
I0202 21:48:06.230189 414960 provision.go:83] configureAuth start
I0202 21:48:06.230232 414960 cli_runner.go:133] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" functional-20220202214710-386638
I0202 21:48:06.261938 414960 provision.go:138] copyHostCerts
I0202 21:48:06.261981 414960 exec_runner.go:144] found /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/cert.pem, removing ...
I0202 21:48:06.261987 414960 exec_runner.go:207] rm: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/cert.pem
I0202 21:48:06.262041 414960 exec_runner.go:151] cp: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/cert.pem (1123 bytes)
I0202 21:48:06.262124 414960 exec_runner.go:144] found /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/key.pem, removing ...
I0202 21:48:06.262129 414960 exec_runner.go:207] rm: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/key.pem
I0202 21:48:06.262150 414960 exec_runner.go:151] cp: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/key.pem (1679 bytes)
I0202 21:48:06.262200 414960 exec_runner.go:144] found /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/ca.pem, removing ...
I0202 21:48:06.262202 414960 exec_runner.go:207] rm: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/ca.pem
I0202 21:48:06.262218 414960 exec_runner.go:151] cp: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/ca.pem (1078 bytes)
I0202 21:48:06.262254 414960 provision.go:112] generating server cert: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/certs/ca-key.pem org=jenkins.functional-20220202214710-386638 san=[192.168.49.2 127.0.0.1 localhost 127.0.0.1 minikube functional-20220202214710-386638]
I0202 21:48:06.412661 414960 provision.go:172] copyRemoteCerts
I0202 21:48:06.412718 414960 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0202 21:48:06.412749 414960 cli_runner.go:133] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-20220202214710-386638
I0202 21:48:06.444476 414960 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:49242 SSHKeyPath:/home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/machines/functional-20220202214710-386638/id_rsa Username:docker}
I0202 21:48:06.537500 414960 ssh_runner.go:362] scp /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I0202 21:48:06.554055 414960 ssh_runner.go:362] scp /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/machines/server.pem --> /etc/docker/server.pem (1269 bytes)
I0202 21:48:06.570256 414960 ssh_runner.go:362] scp /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I0202 21:48:06.586340 414960 provision.go:86] duration metric: configureAuth took 356.142367ms
I0202 21:48:06.586353 414960 ubuntu.go:193] setting minikube options for container-runtime
I0202 21:48:06.586526 414960 config.go:176] Loaded profile config "functional-20220202214710-386638": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.23.2
I0202 21:48:06.586562 414960 cli_runner.go:133] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-20220202214710-386638
I0202 21:48:06.618949 414960 main.go:130] libmachine: Using SSH client type: native
I0202 21:48:06.619097 414960 main.go:130] libmachine: &{{{<nil> 0 [] [] []} docker [0x7a0d20] 0x7a3e00 <nil> [] 0s} 127.0.0.1 49242 <nil> <nil>}
I0202 21:48:06.619103 414960 main.go:130] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I0202 21:48:06.750489 414960 main.go:130] libmachine: SSH cmd err, output: <nil>: overlay
I0202 21:48:06.750507 414960 ubuntu.go:71] root file system type: overlay
I0202 21:48:06.750769 414960 provision.go:309] Updating docker unit: /lib/systemd/system/docker.service ...
I0202 21:48:06.750830 414960 cli_runner.go:133] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-20220202214710-386638
I0202 21:48:06.783186 414960 main.go:130] libmachine: Using SSH client type: native
I0202 21:48:06.783333 414960 main.go:130] libmachine: &{{{<nil> 0 [] [] []} docker [0x7a0d20] 0x7a3e00 <nil> [] 0s} 127.0.0.1 49242 <nil> <nil>}
I0202 21:48:06.783419 414960 main.go:130] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %!s(MISSING) "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I0202 21:48:06.923419 414960 main.go:130] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
I0202 21:48:06.923493 414960 cli_runner.go:133] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-20220202214710-386638
I0202 21:48:06.954770 414960 main.go:130] libmachine: Using SSH client type: native
I0202 21:48:06.954922 414960 main.go:130] libmachine: &{{{<nil> 0 [] [] []} docker [0x7a0d20] 0x7a3e00 <nil> [] 0s} 127.0.0.1 49242 <nil> <nil>}
I0202 21:48:06.954933 414960 main.go:130] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I0202 21:48:07.089662 414960 main.go:130] libmachine: SSH cmd err, output: <nil>:
I0202 21:48:07.089680 414960 machine.go:91] provisioned docker machine in 1.190868649s
I0202 21:48:07.089689 414960 start.go:267] post-start starting for "functional-20220202214710-386638" (driver="docker")
I0202 21:48:07.089694 414960 start.go:277] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0202 21:48:07.089737 414960 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0202 21:48:07.089765 414960 cli_runner.go:133] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-20220202214710-386638
I0202 21:48:07.121378 414960 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:49242 SSHKeyPath:/home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/machines/functional-20220202214710-386638/id_rsa Username:docker}
I0202 21:48:07.213602 414960 ssh_runner.go:195] Run: cat /etc/os-release
I0202 21:48:07.216117 414960 main.go:130] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0202 21:48:07.216130 414960 main.go:130] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0202 21:48:07.216136 414960 main.go:130] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0202 21:48:07.216146 414960 info.go:137] Remote host: Ubuntu 20.04.2 LTS
I0202 21:48:07.216153 414960 filesync.go:126] Scanning /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/addons for local assets ...
I0202 21:48:07.216194 414960 filesync.go:126] Scanning /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/files for local assets ...
I0202 21:48:07.216248 414960 filesync.go:149] local asset: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/files/etc/ssl/certs/3866382.pem -> 3866382.pem in /etc/ssl/certs
I0202 21:48:07.216300 414960 filesync.go:149] local asset: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/files/etc/test/nested/copy/386638/hosts -> hosts in /etc/test/nested/copy/386638
I0202 21:48:07.216327 414960 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs /etc/test/nested/copy/386638
I0202 21:48:07.222665 414960 ssh_runner.go:362] scp /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/files/etc/ssl/certs/3866382.pem --> /etc/ssl/certs/3866382.pem (1708 bytes)
I0202 21:48:07.238804 414960 ssh_runner.go:362] scp /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/files/etc/test/nested/copy/386638/hosts --> /etc/test/nested/copy/386638/hosts (40 bytes)
I0202 21:48:07.255394 414960 start.go:270] post-start completed in 165.695279ms
I0202 21:48:07.255433 414960 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0202 21:48:07.255460 414960 cli_runner.go:133] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-20220202214710-386638
I0202 21:48:07.286998 414960 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:49242 SSHKeyPath:/home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/machines/functional-20220202214710-386638/id_rsa Username:docker}
I0202 21:48:07.378839 414960 fix.go:57] fixHost completed within 1.511887666s
I0202 21:48:07.378857 414960 start.go:80] releasing machines lock for "functional-20220202214710-386638", held for 1.511925277s
I0202 21:48:07.378948 414960 cli_runner.go:133] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" functional-20220202214710-386638
I0202 21:48:07.410807 414960 ssh_runner.go:195] Run: systemctl --version
I0202 21:48:07.410850 414960 cli_runner.go:133] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-20220202214710-386638
I0202 21:48:07.410875 414960 ssh_runner.go:195] Run: curl -sS -m 2 https://k8s.gcr.io/
I0202 21:48:07.410916 414960 cli_runner.go:133] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-20220202214710-386638
I0202 21:48:07.442426 414960 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:49242 SSHKeyPath:/home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/machines/functional-20220202214710-386638/id_rsa Username:docker}
I0202 21:48:07.442888 414960 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:49242 SSHKeyPath:/home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/machines/functional-20220202214710-386638/id_rsa Username:docker}
I0202 21:48:07.549185 414960 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0202 21:48:07.558156 414960 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0202 21:48:07.567527 414960 cruntime.go:272] skipping containerd shutdown because we are bound to it
I0202 21:48:07.567590 414960 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0202 21:48:07.576459 414960 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %!s(MISSING) "runtime-endpoint: unix:///var/run/dockershim.sock
image-endpoint: unix:///var/run/dockershim.sock
" | sudo tee /etc/crictl.yaml"
I0202 21:48:07.588411 414960 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I0202 21:48:07.685101 414960 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I0202 21:48:07.780341 414960 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0202 21:48:07.789211 414960 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0202 21:48:07.882057 414960 ssh_runner.go:195] Run: sudo systemctl start docker
I0202 21:48:07.891086 414960 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0202 21:48:07.929145 414960 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0202 21:48:07.970218 414960 out.go:203] * Preparing Kubernetes v1.23.2 on Docker 20.10.12 ...
I0202 21:48:07.970299 414960 cli_runner.go:133] Run: docker network inspect functional-20220202214710-386638 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0202 21:48:08.000866 414960 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0202 21:48:08.006181 414960 out.go:176] - apiserver.enable-admission-plugins=NamespaceAutoProvision
I0202 21:48:08.007710 414960 out.go:176] - kubelet.housekeeping-interval=5m
I0202 21:48:08.007767 414960 preload.go:132] Checking if preload exists for k8s version v1.23.2 and runtime docker
I0202 21:48:08.007811 414960 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0202 21:48:08.039587 414960 docker.go:606] Got preloaded images: -- stdout --
minikube-local-cache-test:functional-20220202214710-386638
k8s.gcr.io/kube-apiserver:v1.23.2
k8s.gcr.io/kube-controller-manager:v1.23.2
k8s.gcr.io/kube-scheduler:v1.23.2
k8s.gcr.io/kube-proxy:v1.23.2
k8s.gcr.io/etcd:3.5.1-0
k8s.gcr.io/coredns/coredns:v1.8.6
k8s.gcr.io/pause:3.6
kubernetesui/dashboard:v2.3.1
kubernetesui/metrics-scraper:v1.0.7
gcr.io/k8s-minikube/storage-provisioner:v5
k8s.gcr.io/pause:3.3
k8s.gcr.io/pause:3.1
k8s.gcr.io/pause:latest
-- /stdout --
I0202 21:48:08.039609 414960 docker.go:537] Images already preloaded, skipping extraction
I0202 21:48:08.039652 414960 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0202 21:48:08.071045 414960 docker.go:606] Got preloaded images: -- stdout --
minikube-local-cache-test:functional-20220202214710-386638
k8s.gcr.io/kube-apiserver:v1.23.2
k8s.gcr.io/kube-scheduler:v1.23.2
k8s.gcr.io/kube-controller-manager:v1.23.2
k8s.gcr.io/kube-proxy:v1.23.2
k8s.gcr.io/etcd:3.5.1-0
k8s.gcr.io/coredns/coredns:v1.8.6
k8s.gcr.io/pause:3.6
kubernetesui/dashboard:v2.3.1
kubernetesui/metrics-scraper:v1.0.7
gcr.io/k8s-minikube/storage-provisioner:v5
k8s.gcr.io/pause:3.3
k8s.gcr.io/pause:3.1
k8s.gcr.io/pause:latest
-- /stdout --
I0202 21:48:08.071060 414960 cache_images.go:84] Images are preloaded, skipping loading
I0202 21:48:08.071108 414960 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I0202 21:48:08.148485 414960 extraconfig.go:124] Overwriting default enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota with user provided enable-admission-plugins=NamespaceAutoProvision for component apiserver
I0202 21:48:08.148521 414960 cni.go:93] Creating CNI manager for ""
I0202 21:48:08.148557 414960 cni.go:167] CNI unnecessary in this configuration, recommending no CNI
I0202 21:48:08.148566 414960 kubeadm.go:87] Using pod CIDR: 10.244.0.0/16
I0202 21:48:08.148577 414960 kubeadm.go:158] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8441 KubernetesVersion:v1.23.2 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:functional-20220202214710-386638 NodeName:functional-20220202214710-386638 DNSDomain:cluster.local CRISocket:/var/run/dockershim.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceAutoProvision] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NoTaintMaster:true NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[]}
I0202 21:48:08.148683 414960 kubeadm.go:162] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.49.2
bindPort: 8441
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: "functional-20220202214710-386638"
kubeletExtraArgs:
node-ip: 192.168.49.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
enable-admission-plugins: "NamespaceAutoProvision"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8441
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.23.2
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%!"(MISSING)
nodefs.inodesFree: "0%!"(MISSING)
imagefs.available: "0%!"(MISSING)
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0202 21:48:08.148742 414960 kubeadm.go:931] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.23.2/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --container-runtime=docker --hostname-override=functional-20220202214710-386638 --housekeeping-interval=5m --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
[Install]
config:
{KubernetesVersion:v1.23.2 ClusterName:functional-20220202214710-386638 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision} {Component:kubelet Key:housekeeping-interval Value:5m}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8441 NodeName:}
I0202 21:48:08.148785 414960 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.23.2
I0202 21:48:08.155715 414960 binaries.go:44] Found k8s binaries, skipping transfer
I0202 21:48:08.155761 414960 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I0202 21:48:08.162029 414960 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (385 bytes)
I0202 21:48:08.173687 414960 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0202 21:48:08.185172 414960 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (1904 bytes)
I0202 21:48:08.197017 414960 ssh_runner.go:195] Run: grep 192.168.49.2 control-plane.minikube.internal$ /etc/hosts
I0202 21:48:08.199741 414960 certs.go:54] Setting up /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/profiles/functional-20220202214710-386638 for IP: 192.168.49.2
I0202 21:48:08.199834 414960 certs.go:182] skipping minikubeCA CA generation: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/ca.key
I0202 21:48:08.199880 414960 certs.go:182] skipping proxyClientCA CA generation: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/proxy-client-ca.key
I0202 21:48:08.199957 414960 certs.go:298] skipping minikube-user signed cert generation: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/profiles/functional-20220202214710-386638/client.key
I0202 21:48:08.200012 414960 certs.go:298] skipping minikube signed cert generation: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/profiles/functional-20220202214710-386638/apiserver.key.dd3b5fb2
I0202 21:48:08.200051 414960 certs.go:298] skipping aggregator signed cert generation: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/profiles/functional-20220202214710-386638/proxy-client.key
I0202 21:48:08.200166 414960 certs.go:388] found cert: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/certs/home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/certs/386638.pem (1338 bytes)
W0202 21:48:08.200196 414960 certs.go:384] ignoring /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/certs/home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/certs/386638_empty.pem, impossibly tiny 0 bytes
I0202 21:48:08.200203 414960 certs.go:388] found cert: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/certs/home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/certs/ca-key.pem (1679 bytes)
I0202 21:48:08.200231 414960 certs.go:388] found cert: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/certs/home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/certs/ca.pem (1078 bytes)
I0202 21:48:08.200258 414960 certs.go:388] found cert: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/certs/home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/certs/cert.pem (1123 bytes)
I0202 21:48:08.200281 414960 certs.go:388] found cert: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/certs/home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/certs/key.pem (1679 bytes)
I0202 21:48:08.200321 414960 certs.go:388] found cert: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/files/etc/ssl/certs/home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/files/etc/ssl/certs/3866382.pem (1708 bytes)
I0202 21:48:08.201567 414960 ssh_runner.go:362] scp /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/profiles/functional-20220202214710-386638/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1399 bytes)
I0202 21:48:08.218368 414960 ssh_runner.go:362] scp /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/profiles/functional-20220202214710-386638/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I0202 21:48:08.234058 414960 ssh_runner.go:362] scp /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/profiles/functional-20220202214710-386638/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0202 21:48:08.250001 414960 ssh_runner.go:362] scp /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/profiles/functional-20220202214710-386638/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I0202 21:48:08.265999 414960 ssh_runner.go:362] scp /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0202 21:48:08.282337 414960 ssh_runner.go:362] scp /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I0202 21:48:08.298533 414960 ssh_runner.go:362] scp /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0202 21:48:08.314790 414960 ssh_runner.go:362] scp /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I0202 21:48:08.331486 414960 ssh_runner.go:362] scp /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/certs/386638.pem --> /usr/share/ca-certificates/386638.pem (1338 bytes)
I0202 21:48:08.347778 414960 ssh_runner.go:362] scp /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/files/etc/ssl/certs/3866382.pem --> /usr/share/ca-certificates/3866382.pem (1708 bytes)
I0202 21:48:08.363730 414960 ssh_runner.go:362] scp /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0202 21:48:08.379636 414960 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0202 21:48:08.391040 414960 ssh_runner.go:195] Run: openssl version
I0202 21:48:08.395393 414960 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/386638.pem && ln -fs /usr/share/ca-certificates/386638.pem /etc/ssl/certs/386638.pem"
I0202 21:48:08.402124 414960 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/386638.pem
I0202 21:48:08.404805 414960 certs.go:431] hashing: -rw-r--r-- 1 root root 1338 Feb 2 21:47 /usr/share/ca-certificates/386638.pem
I0202 21:48:08.404841 414960 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/386638.pem
I0202 21:48:08.409157 414960 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/386638.pem /etc/ssl/certs/51391683.0"
I0202 21:48:08.415335 414960 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/3866382.pem && ln -fs /usr/share/ca-certificates/3866382.pem /etc/ssl/certs/3866382.pem"
I0202 21:48:08.422200 414960 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/3866382.pem
I0202 21:48:08.424890 414960 certs.go:431] hashing: -rw-r--r-- 1 root root 1708 Feb 2 21:47 /usr/share/ca-certificates/3866382.pem
I0202 21:48:08.424929 414960 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/3866382.pem
I0202 21:48:08.429506 414960 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/3866382.pem /etc/ssl/certs/3ec20f2e.0"
I0202 21:48:08.435628 414960 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0202 21:48:08.442233 414960 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0202 21:48:08.444977 414960 certs.go:431] hashing: -rw-r--r-- 1 root root 1111 Feb 2 21:42 /usr/share/ca-certificates/minikubeCA.pem
I0202 21:48:08.445014 414960 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0202 21:48:08.449278 414960 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0202 21:48:08.455221 414960 kubeadm.go:390] StartCluster: {Name:functional-20220202214710-386638 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.29@sha256:be897edc9ed473a9678010f390a0092f488f6a1c30865f571c3b6388f9f56f9b Memory:4000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.23.2 ClusterName:functional-20220202214710-386638 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServ
erIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision} {Component:kubelet Key:housekeeping-interval Value:5m}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8441 NodeName:} Nodes:[{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.23.2 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[ambassador:false auto-pause:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false helm-tiller:false ingress:false ingress-dns:false istio:false istio-provisioner:false kong:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry
-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false volumesnapshots:false] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false}
I0202 21:48:08.455328 414960 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I0202 21:48:08.485233 414960 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0202 21:48:08.491749 414960 kubeadm.go:401] found existing configuration files, will attempt cluster restart
I0202 21:48:08.491759 414960 kubeadm.go:600] restartCluster start
I0202 21:48:08.491791 414960 ssh_runner.go:195] Run: sudo test -d /data/minikube
I0202 21:48:08.497583 414960 kubeadm.go:126] /data/minikube skipping compat symlinks: sudo test -d /data/minikube: Process exited with status 1
stdout:
stderr:
I0202 21:48:08.498282 414960 kubeconfig.go:92] found "functional-20220202214710-386638" server: "https://192.168.49.2:8441"
I0202 21:48:08.500966 414960 ssh_runner.go:195] Run: sudo diff -u /var/tmp/minikube/kubeadm.yaml /var/tmp/minikube/kubeadm.yaml.new
I0202 21:48:08.507512 414960 kubeadm.go:568] needs reconfigure: configs differ:
-- stdout --
--- /var/tmp/minikube/kubeadm.yaml 2022-02-02 21:47:24.783914847 +0000
+++ /var/tmp/minikube/kubeadm.yaml.new 2022-02-02 21:48:08.192349744 +0000
@@ -22,7 +22,7 @@
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
- enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
+ enable-admission-plugins: "NamespaceAutoProvision"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
-- /stdout --
I0202 21:48:08.507519 414960 kubeadm.go:1054] stopping kube-system containers ...
I0202 21:48:08.507552 414960 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I0202 21:48:08.540256 414960 docker.go:438] Stopping containers: [814d5d39e5e7 721285b959af 57c890a81600 984b0595435d 8d4ef263a0de 4340f72c87f1 69b3a7dc1d7c 01a90a21bd58 a93f4106bdd4 7f57939fd732 66081e6ef128 3935df79b1bf 12dd19e847f6 45922d9a0372 94d0ebbbbe55]
I0202 21:48:08.540309 414960 ssh_runner.go:195] Run: docker stop 814d5d39e5e7 721285b959af 57c890a81600 984b0595435d 8d4ef263a0de 4340f72c87f1 69b3a7dc1d7c 01a90a21bd58 a93f4106bdd4 7f57939fd732 66081e6ef128 3935df79b1bf 12dd19e847f6 45922d9a0372 94d0ebbbbe55
I0202 21:48:13.614958 414960 ssh_runner.go:235] Completed: docker stop 814d5d39e5e7 721285b959af 57c890a81600 984b0595435d 8d4ef263a0de 4340f72c87f1 69b3a7dc1d7c 01a90a21bd58 a93f4106bdd4 7f57939fd732 66081e6ef128 3935df79b1bf 12dd19e847f6 45922d9a0372 94d0ebbbbe55: (5.07462099s)
I0202 21:48:13.615027 414960 ssh_runner.go:195] Run: sudo systemctl stop kubelet
I0202 21:48:13.709643 414960 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0202 21:48:13.716714 414960 kubeadm.go:154] found existing configuration files:
-rw------- 1 root root 5639 Feb 2 21:47 /etc/kubernetes/admin.conf
-rw------- 1 root root 5652 Feb 2 21:47 /etc/kubernetes/controller-manager.conf
-rw------- 1 root root 2067 Feb 2 21:47 /etc/kubernetes/kubelet.conf
-rw------- 1 root root 5604 Feb 2 21:47 /etc/kubernetes/scheduler.conf
I0202 21:48:13.716758 414960 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/admin.conf
I0202 21:48:13.723177 414960 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/kubelet.conf
I0202 21:48:13.729796 414960 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/controller-manager.conf
I0202 21:48:13.736210 414960 kubeadm.go:165] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/controller-manager.conf: Process exited with status 1
stdout:
stderr:
I0202 21:48:13.736242 414960 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0202 21:48:13.741908 414960 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/scheduler.conf
I0202 21:48:13.748115 414960 kubeadm.go:165] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/scheduler.conf: Process exited with status 1
stdout:
stderr:
I0202 21:48:13.748152 414960 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0202 21:48:13.754033 414960 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0202 21:48:13.760269 414960 kubeadm.go:677] reconfiguring cluster from /var/tmp/minikube/kubeadm.yaml
I0202 21:48:13.760277 414960 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.23.2:$PATH" kubeadm init phase certs all --config /var/tmp/minikube/kubeadm.yaml"
I0202 21:48:13.800107 414960 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.23.2:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml"
I0202 21:48:15.173564 414960 ssh_runner.go:235] Completed: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.23.2:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml": (1.373430623s)
I0202 21:48:15.173588 414960 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.23.2:$PATH" kubeadm init phase kubelet-start --config /var/tmp/minikube/kubeadm.yaml"
I0202 21:48:15.441804 414960 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.23.2:$PATH" kubeadm init phase control-plane all --config /var/tmp/minikube/kubeadm.yaml"
I0202 21:48:15.532165 414960 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.23.2:$PATH" kubeadm init phase etcd local --config /var/tmp/minikube/kubeadm.yaml"
I0202 21:48:15.635235 414960 api_server.go:51] waiting for apiserver process to appear ...
I0202 21:48:15.635306 414960 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0202 21:48:15.654099 414960 api_server.go:71] duration metric: took 18.866933ms to wait for apiserver process to appear ...
I0202 21:48:15.654120 414960 api_server.go:87] waiting for apiserver healthz status ...
I0202 21:48:15.654132 414960 api_server.go:240] Checking apiserver healthz at https://192.168.49.2:8441/healthz ...
I0202 21:48:15.659191 414960 api_server.go:266] https://192.168.49.2:8441/healthz returned 200:
ok
I0202 21:48:15.666098 414960 api_server.go:140] control plane version: v1.23.2
I0202 21:48:15.666110 414960 api_server.go:130] duration metric: took 11.98543ms to wait for apiserver health ...
I0202 21:48:15.666117 414960 cni.go:93] Creating CNI manager for ""
I0202 21:48:15.666122 414960 cni.go:167] CNI unnecessary in this configuration, recommending no CNI
I0202 21:48:15.666127 414960 system_pods.go:43] waiting for kube-system pods to appear ...
I0202 21:48:15.717098 414960 system_pods.go:59] 7 kube-system pods found
I0202 21:48:15.717120 414960 system_pods.go:61] "coredns-64897985d-qmcxc" [d0448ec2-8c41-4697-9419-17bc3267ec06] Running
I0202 21:48:15.717134 414960 system_pods.go:61] "etcd-functional-20220202214710-386638" [87c6f3be-757f-49cc-ac76-10a89cfe3e44] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I0202 21:48:15.717142 414960 system_pods.go:61] "kube-apiserver-functional-20220202214710-386638" [be7b8d8a-56b0-48f5-b841-e20b68886d3a] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I0202 21:48:15.717152 414960 system_pods.go:61] "kube-controller-manager-functional-20220202214710-386638" [d9d2c229-c9e8-4801-be93-bc91260f9aed] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I0202 21:48:15.717156 414960 system_pods.go:61] "kube-proxy-c2lnh" [2e835a4a-ce75-4ea4-93dd-0473663c28e1] Running
I0202 21:48:15.717166 414960 system_pods.go:61] "kube-scheduler-functional-20220202214710-386638" [b85c05bb-5b5e-4b6d-8bbb-0eb4ee77e3ba] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I0202 21:48:15.717174 414960 system_pods.go:61] "storage-provisioner" [4660a715-0b0d-419a-a7b1-650bf4a8466f] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I0202 21:48:15.717180 414960 system_pods.go:74] duration metric: took 51.048746ms to wait for pod list to return data ...
I0202 21:48:15.717189 414960 node_conditions.go:102] verifying NodePressure condition ...
I0202 21:48:15.721095 414960 node_conditions.go:122] node storage ephemeral capacity is 304695084Ki
I0202 21:48:15.721111 414960 node_conditions.go:123] node cpu capacity is 8
I0202 21:48:15.721123 414960 node_conditions.go:105] duration metric: took 3.930275ms to run NodePressure ...
I0202 21:48:15.721141 414960 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.23.2:$PATH" kubeadm init phase addon all --config /var/tmp/minikube/kubeadm.yaml"
I0202 21:48:16.140968 414960 kubeadm.go:732] waiting for restarted kubelet to initialise ...
I0202 21:48:16.144637 414960 kubeadm.go:747] kubelet initialised
I0202 21:48:16.144643 414960 kubeadm.go:748] duration metric: took 3.664967ms waiting for restarted kubelet to initialise ...
I0202 21:48:16.144650 414960 pod_ready.go:35] extra waiting up to 4m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0202 21:48:16.148452 414960 pod_ready.go:78] waiting up to 4m0s for pod "coredns-64897985d-qmcxc" in "kube-system" namespace to be "Ready" ...
I0202 21:48:16.155988 414960 pod_ready.go:97] node "functional-20220202214710-386638" hosting pod "coredns-64897985d-qmcxc" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-20220202214710-386638" has status "Ready":"False"
I0202 21:48:16.155997 414960 pod_ready.go:81] duration metric: took 7.533282ms waiting for pod "coredns-64897985d-qmcxc" in "kube-system" namespace to be "Ready" ...
E0202 21:48:16.156007 414960 pod_ready.go:66] WaitExtra: waitPodCondition: node "functional-20220202214710-386638" hosting pod "coredns-64897985d-qmcxc" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-20220202214710-386638" has status "Ready":"False"
I0202 21:48:16.156034 414960 pod_ready.go:78] waiting up to 4m0s for pod "etcd-functional-20220202214710-386638" in "kube-system" namespace to be "Ready" ...
I0202 21:48:16.208387 414960 pod_ready.go:97] node "functional-20220202214710-386638" hosting pod "etcd-functional-20220202214710-386638" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-20220202214710-386638" has status "Ready":"False"
I0202 21:48:16.208400 414960 pod_ready.go:81] duration metric: took 52.359418ms waiting for pod "etcd-functional-20220202214710-386638" in "kube-system" namespace to be "Ready" ...
E0202 21:48:16.208409 414960 pod_ready.go:66] WaitExtra: waitPodCondition: node "functional-20220202214710-386638" hosting pod "etcd-functional-20220202214710-386638" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-20220202214710-386638" has status "Ready":"False"
I0202 21:48:16.208430 414960 pod_ready.go:78] waiting up to 4m0s for pod "kube-apiserver-functional-20220202214710-386638" in "kube-system" namespace to be "Ready" ...
I0202 21:48:16.212992 414960 pod_ready.go:97] node "functional-20220202214710-386638" hosting pod "kube-apiserver-functional-20220202214710-386638" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-20220202214710-386638" has status "Ready":"False"
I0202 21:48:16.213003 414960 pod_ready.go:81] duration metric: took 4.567506ms waiting for pod "kube-apiserver-functional-20220202214710-386638" in "kube-system" namespace to be "Ready" ...
E0202 21:48:16.213010 414960 pod_ready.go:66] WaitExtra: waitPodCondition: node "functional-20220202214710-386638" hosting pod "kube-apiserver-functional-20220202214710-386638" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-20220202214710-386638" has status "Ready":"False"
I0202 21:48:16.213026 414960 pod_ready.go:78] waiting up to 4m0s for pod "kube-controller-manager-functional-20220202214710-386638" in "kube-system" namespace to be "Ready" ...
I0202 21:48:16.216863 414960 pod_ready.go:97] node "functional-20220202214710-386638" hosting pod "kube-controller-manager-functional-20220202214710-386638" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-20220202214710-386638" has status "Ready":"False"
I0202 21:48:16.216873 414960 pod_ready.go:81] duration metric: took 3.840889ms waiting for pod "kube-controller-manager-functional-20220202214710-386638" in "kube-system" namespace to be "Ready" ...
E0202 21:48:16.216884 414960 pod_ready.go:66] WaitExtra: waitPodCondition: node "functional-20220202214710-386638" hosting pod "kube-controller-manager-functional-20220202214710-386638" in "kube-system" namespace is currently not "Ready" (skipping!): node "functional-20220202214710-386638" has status "Ready":"False"
I0202 21:48:16.216903 414960 pod_ready.go:78] waiting up to 4m0s for pod "kube-proxy-c2lnh" in "kube-system" namespace to be "Ready" ...
I0202 21:48:16.544648 414960 pod_ready.go:92] pod "kube-proxy-c2lnh" in "kube-system" namespace has status "Ready":"True"
I0202 21:48:16.544656 414960 pod_ready.go:81] duration metric: took 327.747425ms waiting for pod "kube-proxy-c2lnh" in "kube-system" namespace to be "Ready" ...
I0202 21:48:16.544665 414960 pod_ready.go:78] waiting up to 4m0s for pod "kube-scheduler-functional-20220202214710-386638" in "kube-system" namespace to be "Ready" ...
I0202 21:48:18.959820 414960 pod_ready.go:97] error getting pod "kube-scheduler-functional-20220202214710-386638" in "kube-system" namespace (skipping!): Get "https://192.168.49.2:8441/api/v1/namespaces/kube-system/pods/kube-scheduler-functional-20220202214710-386638": dial tcp 192.168.49.2:8441: connect: connection refused
I0202 21:48:18.959840 414960 pod_ready.go:81] duration metric: took 2.41517032s waiting for pod "kube-scheduler-functional-20220202214710-386638" in "kube-system" namespace to be "Ready" ...
E0202 21:48:18.959848 414960 pod_ready.go:66] WaitExtra: waitPodCondition: error getting pod "kube-scheduler-functional-20220202214710-386638" in "kube-system" namespace (skipping!): Get "https://192.168.49.2:8441/api/v1/namespaces/kube-system/pods/kube-scheduler-functional-20220202214710-386638": dial tcp 192.168.49.2:8441: connect: connection refused
I0202 21:48:18.959873 414960 pod_ready.go:38] duration metric: took 2.815215375s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0202 21:48:18.959896 414960 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
W0202 21:48:18.976105 414960 kubeadm.go:756] unable to adjust resource limits: oom_adj check cmd /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj". : /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj": Process exited with status 1
stdout:
stderr:
cat: /proc//oom_adj: No such file or directory
I0202 21:48:18.976119 414960 kubeadm.go:604] restartCluster took 10.48435701s
I0202 21:48:18.976124 414960 kubeadm.go:392] StartCluster complete in 10.520908226s
I0202 21:48:18.976136 414960 settings.go:142] acquiring lock: {Name:mkc564df8104e4c2326cd37cd909420c5fd7241d Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0202 21:48:18.976222 414960 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/kubeconfig
I0202 21:48:18.976747 414960 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/kubeconfig: {Name:mkd9197ef7cab52290ec1513b45875905284aec6 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
W0202 21:48:18.978668 414960 kapi.go:226] failed getting deployment scale, will retry: Get "https://192.168.49.2:8441/apis/apps/v1/namespaces/kube-system/deployments/coredns/scale": dial tcp 192.168.49.2:8441: connect: connection refused
W0202 21:48:19.479803 414960 kapi.go:226] failed getting deployment scale, will retry: Get "https://192.168.49.2:8441/apis/apps/v1/namespaces/kube-system/deployments/coredns/scale": dial tcp 192.168.49.2:8441: connect: connection refused
I0202 21:48:21.825394 414960 kapi.go:244] deployment "coredns" in namespace "kube-system" and context "functional-20220202214710-386638" rescaled to 1
I0202 21:48:21.825445 414960 start.go:208] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.23.2 ContainerRuntime:docker ControlPlane:true Worker:true}
I0202 21:48:21.827602 414960 out.go:176] * Verifying Kubernetes components...
I0202 21:48:21.827671 414960 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0202 21:48:21.825587 414960 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.23.2/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I0202 21:48:21.825603 414960 addons.go:415] enableAddons start: toEnable=map[ambassador:false auto-pause:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false helm-tiller:false ingress:false ingress-dns:false istio:false istio-provisioner:false kong:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false volumesnapshots:false], additional=[]
I0202 21:48:21.827805 414960 addons.go:65] Setting storage-provisioner=true in profile "functional-20220202214710-386638"
I0202 21:48:21.827820 414960 addons.go:153] Setting addon storage-provisioner=true in "functional-20220202214710-386638"
W0202 21:48:21.827825 414960 addons.go:165] addon storage-provisioner should already be in state true
I0202 21:48:21.827850 414960 host.go:66] Checking if "functional-20220202214710-386638" exists ...
I0202 21:48:21.828380 414960 cli_runner.go:133] Run: docker container inspect functional-20220202214710-386638 --format={{.State.Status}}
I0202 21:48:21.825862 414960 config.go:176] Loaded profile config "functional-20220202214710-386638": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.23.2
I0202 21:48:21.828625 414960 addons.go:65] Setting default-storageclass=true in profile "functional-20220202214710-386638"
I0202 21:48:21.828643 414960 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "functional-20220202214710-386638"
I0202 21:48:21.828959 414960 cli_runner.go:133] Run: docker container inspect functional-20220202214710-386638 --format={{.State.Status}}
I0202 21:48:21.868236 414960 out.go:176] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0202 21:48:21.868559 414960 addons.go:348] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0202 21:48:21.868569 414960 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0202 21:48:21.868614 414960 cli_runner.go:133] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-20220202214710-386638
I0202 21:48:21.898343 414960 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:49242 SSHKeyPath:/home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/machines/functional-20220202214710-386638/id_rsa Username:docker}
I0202 21:48:22.008648 414960 addons.go:153] Setting addon default-storageclass=true in "functional-20220202214710-386638"
W0202 21:48:22.008661 414960 addons.go:165] addon default-storageclass should already be in state true
I0202 21:48:22.008692 414960 host.go:66] Checking if "functional-20220202214710-386638" exists ...
I0202 21:48:22.009241 414960 cli_runner.go:133] Run: docker container inspect functional-20220202214710-386638 --format={{.State.Status}}
I0202 21:48:22.050540 414960 addons.go:348] installing /etc/kubernetes/addons/storageclass.yaml
I0202 21:48:22.050553 414960 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0202 21:48:22.050614 414960 cli_runner.go:133] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-20220202214710-386638
I0202 21:48:22.080786 414960 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:49242 SSHKeyPath:/home/jenkins/minikube-integration/linux-amd64-docker-docker-13251-383287-cce8d1911280cbcb62c9a9805b43d62c56136aef/.minikube/machines/functional-20220202214710-386638/id_rsa Username:docker}
I0202 21:48:22.117510 414960 node_ready.go:35] waiting up to 6m0s for node "functional-20220202214710-386638" to be "Ready" ...
I0202 21:48:22.117531 414960 start.go:757] CoreDNS already contains "host.minikube.internal" host record, skipping...
I0202 21:48:22.118379 414960 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.23.2/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0202 21:48:22.120164 414960 node_ready.go:49] node "functional-20220202214710-386638" has status "Ready":"True"
I0202 21:48:22.120171 414960 node_ready.go:38] duration metric: took 2.634646ms waiting for node "functional-20220202214710-386638" to be "Ready" ...
I0202 21:48:22.120182 414960 pod_ready.go:35] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0202 21:48:22.126639 414960 pod_ready.go:78] waiting up to 6m0s for pod "coredns-64897985d-qmcxc" in "kube-system" namespace to be "Ready" ...
I0202 21:48:22.130735 414960 pod_ready.go:92] pod "coredns-64897985d-qmcxc" in "kube-system" namespace has status "Ready":"True"
I0202 21:48:22.130743 414960 pod_ready.go:81] duration metric: took 4.09023ms waiting for pod "coredns-64897985d-qmcxc" in "kube-system" namespace to be "Ready" ...
I0202 21:48:22.130755 414960 pod_ready.go:78] waiting up to 6m0s for pod "etcd-functional-20220202214710-386638" in "kube-system" namespace to be "Ready" ...
I0202 21:48:22.220632 414960 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.23.2/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0202 21:48:22.946163 414960 out.go:176] * Enabled addons: storage-provisioner, default-storageclass
I0202 21:48:22.946191 414960 addons.go:417] enableAddons completed in 1.120590066s
I0202 21:48:24.140288 414960 pod_ready.go:102] pod "etcd-functional-20220202214710-386638" in "kube-system" namespace has status "Ready":"False"
I0202 21:48:26.640325 414960 pod_ready.go:102] pod "etcd-functional-20220202214710-386638" in "kube-system" namespace has status "Ready":"False"
I0202 21:48:29.140081 414960 pod_ready.go:102] pod "etcd-functional-20220202214710-386638" in "kube-system" namespace has status "Ready":"False"
I0202 21:48:29.639231 414960 pod_ready.go:92] pod "etcd-functional-20220202214710-386638" in "kube-system" namespace has status "Ready":"True"
I0202 21:48:29.639252 414960 pod_ready.go:81] duration metric: took 7.508490778s waiting for pod "etcd-functional-20220202214710-386638" in "kube-system" namespace to be "Ready" ...
I0202 21:48:29.639260 414960 pod_ready.go:78] waiting up to 6m0s for pod "kube-controller-manager-functional-20220202214710-386638" in "kube-system" namespace to be "Ready" ...
I0202 21:48:29.643022 414960 pod_ready.go:92] pod "kube-controller-manager-functional-20220202214710-386638" in "kube-system" namespace has status "Ready":"True"
I0202 21:48:29.643030 414960 pod_ready.go:81] duration metric: took 3.762921ms waiting for pod "kube-controller-manager-functional-20220202214710-386638" in "kube-system" namespace to be "Ready" ...
I0202 21:48:29.643041 414960 pod_ready.go:78] waiting up to 6m0s for pod "kube-proxy-c2lnh" in "kube-system" namespace to be "Ready" ...
I0202 21:48:29.646728 414960 pod_ready.go:92] pod "kube-proxy-c2lnh" in "kube-system" namespace has status "Ready":"True"
I0202 21:48:29.646734 414960 pod_ready.go:81] duration metric: took 3.687602ms waiting for pod "kube-proxy-c2lnh" in "kube-system" namespace to be "Ready" ...
I0202 21:48:29.646740 414960 pod_ready.go:78] waiting up to 6m0s for pod "kube-scheduler-functional-20220202214710-386638" in "kube-system" namespace to be "Ready" ...
I0202 21:48:29.649985 414960 pod_ready.go:92] pod "kube-scheduler-functional-20220202214710-386638" in "kube-system" namespace has status "Ready":"True"
I0202 21:48:29.649991 414960 pod_ready.go:81] duration metric: took 3.246333ms waiting for pod "kube-scheduler-functional-20220202214710-386638" in "kube-system" namespace to be "Ready" ...
I0202 21:48:29.649998 414960 pod_ready.go:38] duration metric: took 7.529806488s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0202 21:48:29.650016 414960 api_server.go:51] waiting for apiserver process to appear ...
I0202 21:48:29.650057 414960 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0202 21:48:29.669840 414960 api_server.go:71] duration metric: took 7.844359904s to wait for apiserver process to appear ...
I0202 21:48:29.669852 414960 api_server.go:87] waiting for apiserver healthz status ...
I0202 21:48:29.669859 414960 api_server.go:240] Checking apiserver healthz at https://192.168.49.2:8441/healthz ...
I0202 21:48:29.674217 414960 api_server.go:266] https://192.168.49.2:8441/healthz returned 200:
ok
I0202 21:48:29.674899 414960 api_server.go:140] control plane version: v1.23.2
I0202 21:48:29.674908 414960 api_server.go:130] duration metric: took 5.052911ms to wait for apiserver health ...
I0202 21:48:29.674913 414960 system_pods.go:43] waiting for kube-system pods to appear ...
I0202 21:48:29.678692 414960 system_pods.go:59] 7 kube-system pods found
I0202 21:48:29.678704 414960 system_pods.go:61] "coredns-64897985d-qmcxc" [d0448ec2-8c41-4697-9419-17bc3267ec06] Running
I0202 21:48:29.678708 414960 system_pods.go:61] "etcd-functional-20220202214710-386638" [87c6f3be-757f-49cc-ac76-10a89cfe3e44] Running
I0202 21:48:29.678743 414960 system_pods.go:61] "kube-apiserver-functional-20220202214710-386638" [b7c7a9e3-1a91-4c68-b4be-8262631e90e3] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I0202 21:48:29.678746 414960 system_pods.go:61] "kube-controller-manager-functional-20220202214710-386638" [d9d2c229-c9e8-4801-be93-bc91260f9aed] Running
I0202 21:48:29.678751 414960 system_pods.go:61] "kube-proxy-c2lnh" [2e835a4a-ce75-4ea4-93dd-0473663c28e1] Running
I0202 21:48:29.678754 414960 system_pods.go:61] "kube-scheduler-functional-20220202214710-386638" [b85c05bb-5b5e-4b6d-8bbb-0eb4ee77e3ba] Running
I0202 21:48:29.678757 414960 system_pods.go:61] "storage-provisioner" [4660a715-0b0d-419a-a7b1-650bf4a8466f] Running
I0202 21:48:29.678760 414960 system_pods.go:74] duration metric: took 3.843828ms to wait for pod list to return data ...
I0202 21:48:29.678764 414960 default_sa.go:34] waiting for default service account to be created ...
I0202 21:48:29.680686 414960 default_sa.go:45] found service account: "default"
I0202 21:48:29.680693 414960 default_sa.go:55] duration metric: took 1.923488ms for default service account to be created ...
I0202 21:48:29.680698 414960 system_pods.go:116] waiting for k8s-apps to be running ...
I0202 21:48:29.839310 414960 system_pods.go:86] 7 kube-system pods found
I0202 21:48:29.839328 414960 system_pods.go:89] "coredns-64897985d-qmcxc" [d0448ec2-8c41-4697-9419-17bc3267ec06] Running
I0202 21:48:29.839333 414960 system_pods.go:89] "etcd-functional-20220202214710-386638" [87c6f3be-757f-49cc-ac76-10a89cfe3e44] Running
I0202 21:48:29.839339 414960 system_pods.go:89] "kube-apiserver-functional-20220202214710-386638" [b7c7a9e3-1a91-4c68-b4be-8262631e90e3] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I0202 21:48:29.839343 414960 system_pods.go:89] "kube-controller-manager-functional-20220202214710-386638" [d9d2c229-c9e8-4801-be93-bc91260f9aed] Running
I0202 21:48:29.839346 414960 system_pods.go:89] "kube-proxy-c2lnh" [2e835a4a-ce75-4ea4-93dd-0473663c28e1] Running
I0202 21:48:29.839350 414960 system_pods.go:89] "kube-scheduler-functional-20220202214710-386638" [b85c05bb-5b5e-4b6d-8bbb-0eb4ee77e3ba] Running
I0202 21:48:29.839352 414960 system_pods.go:89] "storage-provisioner" [4660a715-0b0d-419a-a7b1-650bf4a8466f] Running
I0202 21:48:29.839356 414960 system_pods.go:126] duration metric: took 158.655218ms to wait for k8s-apps to be running ...
I0202 21:48:29.839362 414960 system_svc.go:44] waiting for kubelet service to be running ....
I0202 21:48:29.839400 414960 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0202 21:48:29.848770 414960 system_svc.go:56] duration metric: took 9.399604ms WaitForService to wait for kubelet.
I0202 21:48:29.848782 414960 kubeadm.go:547] duration metric: took 8.023308795s to wait for : map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] ...
I0202 21:48:29.848802 414960 node_conditions.go:102] verifying NodePressure condition ...
I0202 21:48:30.038006 414960 node_conditions.go:122] node storage ephemeral capacity is 304695084Ki
I0202 21:48:30.038018 414960 node_conditions.go:123] node cpu capacity is 8
I0202 21:48:30.038027 414960 node_conditions.go:105] duration metric: took 189.221761ms to run NodePressure ...
I0202 21:48:30.038035 414960 start.go:213] waiting for startup goroutines ...
I0202 21:48:30.071991 414960 start.go:496] kubectl: 1.23.3, cluster: 1.23.2 (minor skew: 0)
I0202 21:48:30.074312 414960 out.go:176] * Done! kubectl is now configured to use "functional-20220202214710-386638" cluster and "default" namespace by default
*
* ==> Docker <==
* -- Logs begin at Wed 2022-02-02 21:47:20 UTC, end at Wed 2022-02-02 21:48:31 UTC. --
Feb 02 21:47:22 functional-20220202214710-386638 dockerd[458]: time="2022-02-02T21:47:22.888665297Z" level=info msg="Docker daemon" commit=459d0df graphdriver(s)=overlay2 version=20.10.12
Feb 02 21:47:22 functional-20220202214710-386638 dockerd[458]: time="2022-02-02T21:47:22.888728122Z" level=info msg="Daemon has completed initialization"
Feb 02 21:47:22 functional-20220202214710-386638 systemd[1]: Started Docker Application Container Engine.
Feb 02 21:47:22 functional-20220202214710-386638 dockerd[458]: time="2022-02-02T21:47:22.904877194Z" level=info msg="API listen on [::]:2376"
Feb 02 21:47:22 functional-20220202214710-386638 dockerd[458]: time="2022-02-02T21:47:22.908272077Z" level=info msg="API listen on /var/run/docker.sock"
Feb 02 21:47:54 functional-20220202214710-386638 dockerd[458]: time="2022-02-02T21:47:54.484839101Z" level=info msg="ignoring event" container=a320f64aeec702dc71bdc7c60e78e3a71aa7942e610d2ae2024382f7a5dc5ce6 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Feb 02 21:47:54 functional-20220202214710-386638 dockerd[458]: time="2022-02-02T21:47:54.536903771Z" level=info msg="ignoring event" container=4340f72c87f173f19edf721c5e724032c5888d6b91b9eb00453f0d74195bb14c module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Feb 02 21:48:08 functional-20220202214710-386638 dockerd[458]: time="2022-02-02T21:48:08.597885528Z" level=info msg="ignoring event" container=814d5d39e5e79ef44dbf90ae3d9de78ce4ef0e05ee5b0adb2ecf105272bdd3ea module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Feb 02 21:48:08 functional-20220202214710-386638 dockerd[458]: time="2022-02-02T21:48:08.731665035Z" level=info msg="ignoring event" container=66081e6ef12808f66181c38c236719ade987c4c4da621d6c5406a2409adc276d module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Feb 02 21:48:08 functional-20220202214710-386638 dockerd[458]: time="2022-02-02T21:48:08.807734311Z" level=info msg="ignoring event" container=984b0595435d870a47950a1bca22f6539fbc7908b0b144be86070dde4def88dd module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Feb 02 21:48:08 functional-20220202214710-386638 dockerd[458]: time="2022-02-02T21:48:08.810288765Z" level=info msg="ignoring event" container=45922d9a03728f9191db73690ca5a4081bfe0374605ad74ce73bf0bee06132bf module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Feb 02 21:48:08 functional-20220202214710-386638 dockerd[458]: time="2022-02-02T21:48:08.810328011Z" level=info msg="ignoring event" container=721285b959af9e497fb4e24130423ccf2f5adde4e06e8bc42c05dfa48cf58c62 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Feb 02 21:48:08 functional-20220202214710-386638 dockerd[458]: time="2022-02-02T21:48:08.810359197Z" level=info msg="ignoring event" container=94d0ebbbbe556f8e9ce639e6a52efb784af6431a8267a5fd6b59026f5ca01c7e module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Feb 02 21:48:08 functional-20220202214710-386638 dockerd[458]: time="2022-02-02T21:48:08.810374080Z" level=info msg="ignoring event" container=01a90a21bd5819d1f5a7a385e1ab6e81c14b408e8374a8fe74bf6325885dcd97 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Feb 02 21:48:08 functional-20220202214710-386638 dockerd[458]: time="2022-02-02T21:48:08.811418177Z" level=info msg="ignoring event" container=12dd19e847f68236e41bd00c2fb502a509a247929ecb10cf6fd39f1b948887c5 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Feb 02 21:48:08 functional-20220202214710-386638 dockerd[458]: time="2022-02-02T21:48:08.814780561Z" level=info msg="ignoring event" container=69b3a7dc1d7ce2758a45406798d36e80f79d253d4bfffbde264d55d412dcf802 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Feb 02 21:48:08 functional-20220202214710-386638 dockerd[458]: time="2022-02-02T21:48:08.817718034Z" level=info msg="ignoring event" container=8d4ef263a0deb5e45a7204535d7ddab5b409f7cb1b43b39ac641392a3c83f172 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Feb 02 21:48:08 functional-20220202214710-386638 dockerd[458]: time="2022-02-02T21:48:08.818148312Z" level=info msg="ignoring event" container=3935df79b1bfdcdd30d787c5d7180068cfa22a855a51563c16d4cfbebad072c6 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Feb 02 21:48:09 functional-20220202214710-386638 dockerd[458]: time="2022-02-02T21:48:09.627218393Z" level=info msg="ignoring event" container=a93f4106bdd49ee81765b71dee3de72773e17c93f5eb8b038cc4c7636e59d3aa module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Feb 02 21:48:09 functional-20220202214710-386638 dockerd[458]: time="2022-02-02T21:48:09.637333860Z" level=info msg="ignoring event" container=7f57939fd732a2da4dbf3d35cfc17a89f1b6876d8b55204e828f3c8bd609e1fa module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Feb 02 21:48:10 functional-20220202214710-386638 dockerd[458]: time="2022-02-02T21:48:10.419917073Z" level=info msg="ignoring event" container=4e9cce7a10fc55f2dbbdc4177b6341887968004fb441bca08dc777522255a5c8 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Feb 02 21:48:13 functional-20220202214710-386638 dockerd[458]: time="2022-02-02T21:48:13.601105891Z" level=info msg="ignoring event" container=57c890a8160058a44730897ed106714e788cd210e12a754bd611631fd7714cf4 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Feb 02 21:48:17 functional-20220202214710-386638 dockerd[458]: time="2022-02-02T21:48:17.248090208Z" level=info msg="ignoring event" container=1251a66fdbb5f805a7e800f9eeabecfa04f9ee346e6a984c81966f441a7b4fd7 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Feb 02 21:48:17 functional-20220202214710-386638 dockerd[458]: time="2022-02-02T21:48:17.946512627Z" level=info msg="ignoring event" container=3ca396f74545111c90e718c23955827968c389a8dab5bc19096f90e47919d1d2 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Feb 02 21:48:18 functional-20220202214710-386638 dockerd[458]: time="2022-02-02T21:48:18.009323502Z" level=info msg="ignoring event" container=26bb951a2f1e150bc66a6d600f18eb919d6a3a49f67b3dc307cb5515e06348ec module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
*
* ==> container status <==
* CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID
eebcc13f80f29 6e38f40d628db 8 seconds ago Running storage-provisioner 2 afe5d50c0cb45
2cfd7c3d1963c a4ca41631cc7a 8 seconds ago Running coredns 1 318303fa9030a
16f643730c8ff 8a0228dd6a683 12 seconds ago Running kube-apiserver 1 d994562250771
1251a66fdbb5f 8a0228dd6a683 14 seconds ago Exited kube-apiserver 0 d994562250771
161c307a7ab39 6114d758d6d16 21 seconds ago Running kube-scheduler 1 9cc7e9b544d8d
0e9f9533ff722 4783639ba7e03 21 seconds ago Running kube-controller-manager 1 6a6dcce0faede
4e9cce7a10fc5 6e38f40d628db 21 seconds ago Exited storage-provisioner 1 afe5d50c0cb45
bf69761828687 25f8c7f3da61c 21 seconds ago Running etcd 1 15552edcbb67d
fab9e8c7dd3f4 d922ca3da64b3 22 seconds ago Running kube-proxy 1 1411d0a8c5a17
57c890a816005 a4ca41631cc7a 42 seconds ago Exited coredns 0 984b0595435d8
8d4ef263a0deb d922ca3da64b3 43 seconds ago Exited kube-proxy 0 69b3a7dc1d7ce
01a90a21bd581 25f8c7f3da61c About a minute ago Exited etcd 0 94d0ebbbbe556
7f57939fd732a 6114d758d6d16 About a minute ago Exited kube-scheduler 0 3935df79b1bfd
66081e6ef1280 4783639ba7e03 About a minute ago Exited kube-controller-manager 0 12dd19e847f68
*
* ==> coredns [2cfd7c3d1963] <==
* .:53
[INFO] plugin/reload: Running configuration MD5 = cec3c60eb1cc4909fd4579a8d79ea031
CoreDNS-1.8.6
linux/amd64, go1.17.1, 13a9191
*
* ==> coredns [57c890a81600] <==
* .:53
[INFO] plugin/reload: Running configuration MD5 = db32ca3650231d74073ff4cf814959a7
CoreDNS-1.8.6
linux/amd64, go1.17.1, 13a9191
[INFO] SIGTERM: Shutting down servers then terminating
[INFO] plugin/health: Going into lameduck mode for 5s
*
* ==> describe nodes <==
* Name: functional-20220202214710-386638
Roles: control-plane,master
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=functional-20220202214710-386638
kubernetes.io/os=linux
minikube.k8s.io/commit=e7ecaa98a6d1dab5935ea4b7778c6e187f5bde82
minikube.k8s.io/name=functional-20220202214710-386638
minikube.k8s.io/updated_at=2022_02_02T21_47_35_0700
minikube.k8s.io/version=v1.25.1
node-role.kubernetes.io/control-plane=
node-role.kubernetes.io/master=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /var/run/dockershim.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Wed, 02 Feb 2022 21:47:32 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: functional-20220202214710-386638
AcquireTime: <unset>
RenewTime: Wed, 02 Feb 2022 21:48:26 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Wed, 02 Feb 2022 21:48:16 +0000 Wed, 02 Feb 2022 21:47:29 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Wed, 02 Feb 2022 21:48:16 +0000 Wed, 02 Feb 2022 21:47:29 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Wed, 02 Feb 2022 21:48:16 +0000 Wed, 02 Feb 2022 21:47:29 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Wed, 02 Feb 2022 21:48:16 +0000 Wed, 02 Feb 2022 21:48:16 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.2
Hostname: functional-20220202214710-386638
Capacity:
cpu: 8
ephemeral-storage: 304695084Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32874648Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304695084Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32874648Ki
pods: 110
System Info:
Machine ID: 8de776e053e140d6a14c2d2def3d6bb8
System UUID: bf35b03a-6495-476c-9c20-23113ad939ba
Boot ID: 83bfc470-4931-4701-bbec-fbf02121ac1f
Kernel Version: 5.11.0-1029-gcp
OS Image: Ubuntu 20.04.2 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://20.10.12
Kubelet Version: v1.23.2
Kube-Proxy Version: v1.23.2
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (7 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
kube-system coredns-64897985d-qmcxc 100m (1%!)(MISSING) 0 (0%!)(MISSING) 70Mi (0%!)(MISSING) 170Mi (0%!)(MISSING) 43s
kube-system etcd-functional-20220202214710-386638 100m (1%!)(MISSING) 0 (0%!)(MISSING) 100Mi (0%!)(MISSING) 0 (0%!)(MISSING) 56s
kube-system kube-apiserver-functional-20220202214710-386638 250m (3%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 9s
kube-system kube-controller-manager-functional-20220202214710-386638 200m (2%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 56s
kube-system kube-proxy-c2lnh 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 44s
kube-system kube-scheduler-functional-20220202214710-386638 100m (1%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 56s
kube-system storage-provisioner 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 41s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 750m (9%!)(MISSING) 0 (0%!)(MISSING)
memory 170Mi (0%!)(MISSING) 170Mi (0%!)(MISSING)
ephemeral-storage 0 (0%!)(MISSING) 0 (0%!)(MISSING)
hugepages-1Gi 0 (0%!)(MISSING) 0 (0%!)(MISSING)
hugepages-2Mi 0 (0%!)(MISSING) 0 (0%!)(MISSING)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 42s kube-proxy
Normal Starting 15s kube-proxy
Normal NodeHasSufficientMemory 56s kubelet Node functional-20220202214710-386638 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 56s kubelet Node functional-20220202214710-386638 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 56s kubelet Node functional-20220202214710-386638 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 56s kubelet Updated Node Allocatable limit across pods
Normal Starting 56s kubelet Starting kubelet.
Normal NodeReady 46s kubelet Node functional-20220202214710-386638 status is now: NodeReady
Normal Starting 16s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 16s kubelet Node functional-20220202214710-386638 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 16s kubelet Node functional-20220202214710-386638 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 16s kubelet Node functional-20220202214710-386638 status is now: NodeHasSufficientPID
Normal NodeNotReady 15s kubelet Node functional-20220202214710-386638 status is now: NodeNotReady
Normal NodeAllocatableEnforced 15s kubelet Updated Node Allocatable limit across pods
Normal NodeReady 15s kubelet Node functional-20220202214710-386638 status is now: NodeReady
*
* ==> dmesg <==
* [ +0.000005] ll header: 00000000: ff ff ff ff ff ff 36 73 c4 17 2b 39 08 06
[ +14.700161] IPv4: martian source 10.85.0.26 from 10.85.0.26, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 2a 01 b9 03 03 5a 08 06
[ +16.216625] IPv4: martian source 10.85.0.27 from 10.85.0.27, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff ee b5 9c db 82 76 08 06
[Feb 2 21:36] IPv4: martian source 10.85.0.28 from 10.85.0.28, on dev eth0
[ +0.000027] ll header: 00000000: ff ff ff ff ff ff 96 6b 63 45 2c d8 08 06
[ +13.105020] IPv4: martian source 10.85.0.29 from 10.85.0.29, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff da 50 1c 0f 4e 40 08 06
[ +15.030049] IPv4: martian source 10.85.0.30 from 10.85.0.30, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff fe 22 15 5f e1 47 08 06
[ +14.546166] IPv4: martian source 10.85.0.31 from 10.85.0.31, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 1a f4 e4 d0 20 71 08 06
[Feb 2 21:37] IPv4: martian source 10.85.0.32 from 10.85.0.32, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff e6 a3 62 db 9e 4a 08 06
[ +13.742386] IPv4: martian source 10.85.0.33 from 10.85.0.33, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 4a 2d 33 68 1b 46 08 06
[ +13.053862] IPv4: martian source 10.85.0.34 from 10.85.0.34, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 02 39 ec 3a a8 2e 08 06
[Feb 2 21:38] IPv4: martian source 10.85.0.2 from 10.85.0.2, on dev cni0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 0a 08 10 7c e7 12 08 06
[ +0.000005] IPv4: martian source 10.85.0.2 from 10.85.0.2, on dev eth0
[ +0.000001] ll header: 00000000: ff ff ff ff ff ff 0a 08 10 7c e7 12 08 06
[ +17.683296] IPv4: martian source 10.85.0.3 from 10.85.0.3, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 2e 53 42 ef 9d 27 08 06
*
* ==> etcd [01a90a21bd58] <==
* {"level":"info","ts":"2022-02-02T21:47:29.615Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became pre-candidate at term 1"}
{"level":"info","ts":"2022-02-02T21:47:29.615Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgPreVoteResp from aec36adc501070cc at term 1"}
{"level":"info","ts":"2022-02-02T21:47:29.615Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became candidate at term 2"}
{"level":"info","ts":"2022-02-02T21:47:29.615Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgVoteResp from aec36adc501070cc at term 2"}
{"level":"info","ts":"2022-02-02T21:47:29.615Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became leader at term 2"}
{"level":"info","ts":"2022-02-02T21:47:29.615Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: aec36adc501070cc elected leader aec36adc501070cc at term 2"}
{"level":"info","ts":"2022-02-02T21:47:29.615Z","caller":"etcdserver/server.go:2027","msg":"published local member to cluster through raft","local-member-id":"aec36adc501070cc","local-member-attributes":"{Name:functional-20220202214710-386638 ClientURLs:[https://192.168.49.2:2379]}","request-path":"/0/members/aec36adc501070cc/attributes","cluster-id":"fa54960ea34d58be","publish-timeout":"7s"}
{"level":"info","ts":"2022-02-02T21:47:29.615Z","caller":"embed/serve.go:98","msg":"ready to serve client requests"}
{"level":"info","ts":"2022-02-02T21:47:29.616Z","caller":"embed/serve.go:98","msg":"ready to serve client requests"}
{"level":"info","ts":"2022-02-02T21:47:29.616Z","caller":"etcdserver/server.go:2476","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2022-02-02T21:47:29.616Z","caller":"etcdmain/main.go:47","msg":"notifying init daemon"}
{"level":"info","ts":"2022-02-02T21:47:29.616Z","caller":"etcdmain/main.go:53","msg":"successfully notified init daemon"}
{"level":"info","ts":"2022-02-02T21:47:29.616Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","cluster-version":"3.5"}
{"level":"info","ts":"2022-02-02T21:47:29.616Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2022-02-02T21:47:29.616Z","caller":"etcdserver/server.go:2500","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2022-02-02T21:47:29.616Z","caller":"embed/serve.go:188","msg":"serving client traffic securely","address":"127.0.0.1:2379"}
{"level":"info","ts":"2022-02-02T21:47:29.617Z","caller":"embed/serve.go:188","msg":"serving client traffic securely","address":"192.168.49.2:2379"}
{"level":"info","ts":"2022-02-02T21:48:08.626Z","caller":"osutil/interrupt_unix.go:64","msg":"received signal; shutting down","signal":"terminated"}
{"level":"info","ts":"2022-02-02T21:48:08.626Z","caller":"embed/etcd.go:367","msg":"closing etcd server","name":"functional-20220202214710-386638","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.49.2:2380"],"advertise-client-urls":["https://192.168.49.2:2379"]}
WARNING: 2022/02/02 21:48:08 [core] grpc: addrConn.createTransport failed to connect to {127.0.0.1:2379 127.0.0.1:2379 <nil> 0 <nil>}. Err: connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
WARNING: 2022/02/02 21:48:08 [core] grpc: addrConn.createTransport failed to connect to {192.168.49.2:2379 192.168.49.2:2379 <nil> 0 <nil>}. Err: connection error: desc = "transport: Error while dialing dial tcp 192.168.49.2:2379: connect: connection refused". Reconnecting...
{"level":"info","ts":"2022-02-02T21:48:08.637Z","caller":"etcdserver/server.go:1438","msg":"skipped leadership transfer for single voting member cluster","local-member-id":"aec36adc501070cc","current-leader-member-id":"aec36adc501070cc"}
{"level":"info","ts":"2022-02-02T21:48:08.638Z","caller":"embed/etcd.go:562","msg":"stopping serving peer traffic","address":"192.168.49.2:2380"}
{"level":"info","ts":"2022-02-02T21:48:08.706Z","caller":"embed/etcd.go:567","msg":"stopped serving peer traffic","address":"192.168.49.2:2380"}
{"level":"info","ts":"2022-02-02T21:48:08.706Z","caller":"embed/etcd.go:369","msg":"closed etcd server","name":"functional-20220202214710-386638","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.49.2:2380"],"advertise-client-urls":["https://192.168.49.2:2379"]}
*
* ==> etcd [bf6976182868] <==
* {"level":"info","ts":"2022-02-02T21:48:10.536Z","caller":"etcdserver/server.go:843","msg":"starting etcd server","local-member-id":"aec36adc501070cc","local-server-version":"3.5.1","cluster-version":"to_be_decided"}
{"level":"info","ts":"2022-02-02T21:48:10.608Z","caller":"etcdserver/server.go:744","msg":"starting initial election tick advance","election-ticks":10}
{"level":"info","ts":"2022-02-02T21:48:10.608Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc switched to configuration voters=(12593026477526642892)"}
{"level":"info","ts":"2022-02-02T21:48:10.608Z","caller":"membership/cluster.go:421","msg":"added member","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","added-peer-id":"aec36adc501070cc","added-peer-peer-urls":["https://192.168.49.2:2380"]}
{"level":"info","ts":"2022-02-02T21:48:10.609Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","cluster-version":"3.5"}
{"level":"info","ts":"2022-02-02T21:48:10.609Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2022-02-02T21:48:10.610Z","caller":"embed/etcd.go:687","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
{"level":"info","ts":"2022-02-02T21:48:10.610Z","caller":"embed/etcd.go:580","msg":"serving peer traffic","address":"192.168.49.2:2380"}
{"level":"info","ts":"2022-02-02T21:48:10.610Z","caller":"embed/etcd.go:552","msg":"cmux::serve","address":"192.168.49.2:2380"}
{"level":"info","ts":"2022-02-02T21:48:10.610Z","caller":"embed/etcd.go:276","msg":"now serving peer/client/metrics","local-member-id":"aec36adc501070cc","initial-advertise-peer-urls":["https://192.168.49.2:2380"],"listen-peer-urls":["https://192.168.49.2:2380"],"advertise-client-urls":["https://192.168.49.2:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.49.2:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
{"level":"info","ts":"2022-02-02T21:48:10.610Z","caller":"embed/etcd.go:762","msg":"serving metrics","address":"http://127.0.0.1:2381"}
{"level":"info","ts":"2022-02-02T21:48:11.531Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc is starting a new election at term 2"}
{"level":"info","ts":"2022-02-02T21:48:11.531Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became pre-candidate at term 2"}
{"level":"info","ts":"2022-02-02T21:48:11.531Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgPreVoteResp from aec36adc501070cc at term 2"}
{"level":"info","ts":"2022-02-02T21:48:11.531Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became candidate at term 3"}
{"level":"info","ts":"2022-02-02T21:48:11.531Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgVoteResp from aec36adc501070cc at term 3"}
{"level":"info","ts":"2022-02-02T21:48:11.531Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became leader at term 3"}
{"level":"info","ts":"2022-02-02T21:48:11.531Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: aec36adc501070cc elected leader aec36adc501070cc at term 3"}
{"level":"info","ts":"2022-02-02T21:48:11.532Z","caller":"etcdserver/server.go:2027","msg":"published local member to cluster through raft","local-member-id":"aec36adc501070cc","local-member-attributes":"{Name:functional-20220202214710-386638 ClientURLs:[https://192.168.49.2:2379]}","request-path":"/0/members/aec36adc501070cc/attributes","cluster-id":"fa54960ea34d58be","publish-timeout":"7s"}
{"level":"info","ts":"2022-02-02T21:48:11.532Z","caller":"embed/serve.go:98","msg":"ready to serve client requests"}
{"level":"info","ts":"2022-02-02T21:48:11.532Z","caller":"embed/serve.go:98","msg":"ready to serve client requests"}
{"level":"info","ts":"2022-02-02T21:48:11.532Z","caller":"etcdmain/main.go:47","msg":"notifying init daemon"}
{"level":"info","ts":"2022-02-02T21:48:11.532Z","caller":"etcdmain/main.go:53","msg":"successfully notified init daemon"}
{"level":"info","ts":"2022-02-02T21:48:11.533Z","caller":"embed/serve.go:188","msg":"serving client traffic securely","address":"192.168.49.2:2379"}
{"level":"info","ts":"2022-02-02T21:48:11.533Z","caller":"embed/serve.go:188","msg":"serving client traffic securely","address":"127.0.0.1:2379"}
*
* ==> kernel <==
* 21:48:31 up 5:31, 0 users, load average: 1.26, 1.39, 1.01
Linux functional-20220202214710-386638 5.11.0-1029-gcp #33~20.04.3-Ubuntu SMP Tue Jan 18 12:03:29 UTC 2022 x86_64 x86_64 x86_64 GNU/Linux
PRETTY_NAME="Ubuntu 20.04.2 LTS"
*
* ==> kube-apiserver [1251a66fdbb5] <==
* I0202 21:48:17.229597 1 server.go:565] external host was not specified, using 192.168.49.2
I0202 21:48:17.230081 1 server.go:172] Version: v1.23.2
E0202 21:48:17.230398 1 run.go:74] "command failed" err="failed to create listener: failed to listen on 0.0.0.0:8441: listen tcp 0.0.0.0:8441: bind: address already in use"
*
* ==> kube-apiserver [16f643730c8f] <==
* I0202 21:48:21.768644 1 nonstructuralschema_controller.go:192] Starting NonStructuralSchemaConditionController
I0202 21:48:21.768669 1 apiapproval_controller.go:186] Starting KubernetesAPIApprovalPolicyConformantConditionController
I0202 21:48:21.768690 1 crd_finalizer.go:266] Starting CRDFinalizer
I0202 21:48:21.806641 1 apiservice_controller.go:97] Starting APIServiceRegistrationController
I0202 21:48:21.806677 1 cache.go:32] Waiting for caches to sync for APIServiceRegistrationController controller
I0202 21:48:21.806711 1 autoregister_controller.go:141] Starting autoregister controller
I0202 21:48:21.806716 1 cache.go:32] Waiting for caches to sync for autoregister controller
I0202 21:48:21.806976 1 crdregistration_controller.go:111] Starting crd-autoregister controller
I0202 21:48:21.806995 1 shared_informer.go:240] Waiting for caches to sync for crd-autoregister
I0202 21:48:21.811356 1 dynamic_cafile_content.go:156] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt"
I0202 21:48:21.822692 1 dynamic_cafile_content.go:156] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt"
E0202 21:48:21.829256 1 controller.go:157] Error removing old endpoints from kubernetes service: no master IPs were listed in storage, refusing to erase all endpoints for the kubernetes service
I0202 21:48:21.909180 1 shared_informer.go:247] Caches are synced for cluster_authentication_trust_controller
I0202 21:48:21.918794 1 shared_informer.go:247] Caches are synced for crd-autoregister
I0202 21:48:21.918893 1 cache.go:39] Caches are synced for APIServiceRegistrationController controller
I0202 21:48:21.920449 1 cache.go:39] Caches are synced for autoregister controller
I0202 21:48:22.007031 1 apf_controller.go:322] Running API Priority and Fairness config worker
I0202 21:48:22.007064 1 shared_informer.go:247] Caches are synced for node_authorizer
I0202 21:48:22.007030 1 cache.go:39] Caches are synced for AvailableConditionController controller
I0202 21:48:22.765603 1 controller.go:132] OpenAPI AggregationController: action for item : Nothing (removed from the queue).
I0202 21:48:22.765631 1 controller.go:132] OpenAPI AggregationController: action for item k8s_internal_local_delegation_chain_0000000000: Nothing (removed from the queue).
I0202 21:48:22.810807 1 storage_scheduling.go:109] all system priority classes are created successfully or already exist.
I0202 21:48:25.918114 1 controller.go:611] quota admission added evaluator for: endpointslices.discovery.k8s.io
I0202 21:48:25.922555 1 controller.go:611] quota admission added evaluator for: endpoints
I0202 21:48:26.120735 1 controller.go:611] quota admission added evaluator for: leases.coordination.k8s.io
*
* ==> kube-controller-manager [0e9f9533ff72] <==
* I0202 21:48:25.949140 1 node_lifecycle_controller.go:1213] Controller detected that zone is now in state Normal.
I0202 21:48:25.949142 1 event.go:294] "Event occurred" object="functional-20220202214710-386638" kind="Node" apiVersion="v1" type="Normal" reason="RegisteredNode" message="Node functional-20220202214710-386638 event: Registered Node functional-20220202214710-386638 in Controller"
I0202 21:48:25.950076 1 shared_informer.go:247] Caches are synced for TTL after finished
I0202 21:48:25.951282 1 shared_informer.go:247] Caches are synced for node
I0202 21:48:25.951311 1 range_allocator.go:173] Starting range CIDR allocator
I0202 21:48:25.951317 1 shared_informer.go:240] Waiting for caches to sync for cidrallocator
I0202 21:48:25.951326 1 shared_informer.go:247] Caches are synced for cidrallocator
I0202 21:48:25.955571 1 shared_informer.go:247] Caches are synced for HPA
I0202 21:48:25.955600 1 shared_informer.go:247] Caches are synced for PVC protection
I0202 21:48:25.958994 1 shared_informer.go:247] Caches are synced for ReplicationController
I0202 21:48:25.960133 1 shared_informer.go:247] Caches are synced for GC
I0202 21:48:25.962349 1 shared_informer.go:247] Caches are synced for service account
I0202 21:48:25.963481 1 shared_informer.go:247] Caches are synced for endpoint_slice_mirroring
I0202 21:48:25.969390 1 shared_informer.go:247] Caches are synced for crt configmap
I0202 21:48:26.036076 1 shared_informer.go:247] Caches are synced for ClusterRoleAggregator
I0202 21:48:26.125682 1 shared_informer.go:247] Caches are synced for resource quota
I0202 21:48:26.142130 1 shared_informer.go:247] Caches are synced for attach detach
I0202 21:48:26.143033 1 shared_informer.go:247] Caches are synced for disruption
I0202 21:48:26.143053 1 disruption.go:371] Sending events to api server.
I0202 21:48:26.157464 1 shared_informer.go:247] Caches are synced for ReplicaSet
I0202 21:48:26.162455 1 shared_informer.go:247] Caches are synced for deployment
I0202 21:48:26.173671 1 shared_informer.go:247] Caches are synced for resource quota
I0202 21:48:26.585704 1 shared_informer.go:247] Caches are synced for garbage collector
I0202 21:48:26.659811 1 shared_informer.go:247] Caches are synced for garbage collector
I0202 21:48:26.659833 1 garbagecollector.go:155] Garbage collector: all resource monitors have synced. Proceeding to collect garbage
*
* ==> kube-controller-manager [66081e6ef128] <==
* I0202 21:47:47.242471 1 node_lifecycle_controller.go:1213] Controller detected that zone is now in state Normal.
I0202 21:47:47.242525 1 event.go:294] "Event occurred" object="functional-20220202214710-386638" kind="Node" apiVersion="v1" type="Normal" reason="RegisteredNode" message="Node functional-20220202214710-386638 event: Registered Node functional-20220202214710-386638 in Controller"
I0202 21:47:47.242539 1 shared_informer.go:247] Caches are synced for certificate-csrapproving
I0202 21:47:47.242714 1 shared_informer.go:247] Caches are synced for ephemeral
I0202 21:47:47.242736 1 shared_informer.go:247] Caches are synced for GC
I0202 21:47:47.243338 1 shared_informer.go:247] Caches are synced for ReplicationController
I0202 21:47:47.244394 1 shared_informer.go:247] Caches are synced for stateful set
I0202 21:47:47.250897 1 event.go:294] "Event occurred" object="kube-system/kube-proxy" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-c2lnh"
I0202 21:47:47.259157 1 shared_informer.go:247] Caches are synced for PV protection
I0202 21:47:47.294089 1 shared_informer.go:247] Caches are synced for expand
I0202 21:47:47.342871 1 shared_informer.go:247] Caches are synced for persistent volume
I0202 21:47:47.347139 1 shared_informer.go:247] Caches are synced for resource quota
I0202 21:47:47.353350 1 shared_informer.go:247] Caches are synced for resource quota
I0202 21:47:47.363520 1 shared_informer.go:247] Caches are synced for TTL after finished
I0202 21:47:47.392398 1 shared_informer.go:247] Caches are synced for job
I0202 21:47:47.393291 1 shared_informer.go:247] Caches are synced for cronjob
I0202 21:47:47.454039 1 shared_informer.go:247] Caches are synced for attach detach
I0202 21:47:47.866458 1 shared_informer.go:247] Caches are synced for garbage collector
I0202 21:47:47.878620 1 shared_informer.go:247] Caches are synced for garbage collector
I0202 21:47:47.878643 1 garbagecollector.go:155] Garbage collector: all resource monitors have synced. Proceeding to collect garbage
I0202 21:47:47.997401 1 event.go:294] "Event occurred" object="kube-system/coredns" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set coredns-64897985d to 2"
I0202 21:47:48.115331 1 event.go:294] "Event occurred" object="kube-system/coredns" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-64897985d to 1"
I0202 21:47:48.248767 1 event.go:294] "Event occurred" object="kube-system/coredns-64897985d" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-64897985d-gh6d4"
I0202 21:47:48.252723 1 event.go:294] "Event occurred" object="kube-system/coredns-64897985d" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-64897985d-qmcxc"
I0202 21:47:48.269673 1 event.go:294] "Event occurred" object="kube-system/coredns-64897985d" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-64897985d-gh6d4"
*
* ==> kube-proxy [8d4ef263a0de] <==
* I0202 21:47:48.556611 1 node.go:163] Successfully retrieved node IP: 192.168.49.2
I0202 21:47:48.556670 1 server_others.go:138] "Detected node IP" address="192.168.49.2"
I0202 21:47:48.556703 1 server_others.go:561] "Unknown proxy mode, assuming iptables proxy" proxyMode=""
I0202 21:47:48.630950 1 server_others.go:206] "Using iptables Proxier"
I0202 21:47:48.630989 1 server_others.go:213] "kube-proxy running in dual-stack mode" ipFamily=IPv4
I0202 21:47:48.631000 1 server_others.go:214] "Creating dualStackProxier for iptables"
I0202 21:47:48.631016 1 server_others.go:491] "Detect-local-mode set to ClusterCIDR, but no IPv6 cluster CIDR defined, , defaulting to no-op detect-local for IPv6"
I0202 21:47:48.631420 1 server.go:656] "Version info" version="v1.23.2"
I0202 21:47:48.632277 1 config.go:226] "Starting endpoint slice config controller"
I0202 21:47:48.632315 1 shared_informer.go:240] Waiting for caches to sync for endpoint slice config
I0202 21:47:48.632352 1 config.go:317] "Starting service config controller"
I0202 21:47:48.632362 1 shared_informer.go:240] Waiting for caches to sync for service config
I0202 21:47:48.733340 1 shared_informer.go:247] Caches are synced for endpoint slice config
I0202 21:47:48.733450 1 shared_informer.go:247] Caches are synced for service config
*
* ==> kube-proxy [fab9e8c7dd3f] <==
* E0202 21:48:10.512293 1 node.go:152] Failed to retrieve node info: Get "https://control-plane.minikube.internal:8441/api/v1/nodes/functional-20220202214710-386638": dial tcp 192.168.49.2:8441: connect: connection refused
E0202 21:48:13.218466 1 node.go:152] Failed to retrieve node info: nodes "functional-20220202214710-386638" is forbidden: User "system:serviceaccount:kube-system:kube-proxy" cannot get resource "nodes" in API group "" at the cluster scope
I0202 21:48:15.310051 1 node.go:163] Successfully retrieved node IP: 192.168.49.2
I0202 21:48:15.310286 1 server_others.go:138] "Detected node IP" address="192.168.49.2"
I0202 21:48:15.310479 1 server_others.go:561] "Unknown proxy mode, assuming iptables proxy" proxyMode=""
I0202 21:48:15.340977 1 server_others.go:206] "Using iptables Proxier"
I0202 21:48:15.341007 1 server_others.go:213] "kube-proxy running in dual-stack mode" ipFamily=IPv4
I0202 21:48:15.341016 1 server_others.go:214] "Creating dualStackProxier for iptables"
I0202 21:48:15.341034 1 server_others.go:491] "Detect-local-mode set to ClusterCIDR, but no IPv6 cluster CIDR defined, , defaulting to no-op detect-local for IPv6"
I0202 21:48:15.341462 1 server.go:656] "Version info" version="v1.23.2"
I0202 21:48:15.407833 1 config.go:317] "Starting service config controller"
I0202 21:48:15.407857 1 shared_informer.go:240] Waiting for caches to sync for service config
I0202 21:48:15.408089 1 config.go:226] "Starting endpoint slice config controller"
I0202 21:48:15.408150 1 shared_informer.go:240] Waiting for caches to sync for endpoint slice config
I0202 21:48:15.508920 1 shared_informer.go:247] Caches are synced for endpoint slice config
I0202 21:48:15.509044 1 shared_informer.go:247] Caches are synced for service config
*
* ==> kube-scheduler [161c307a7ab3] <==
* W0202 21:48:13.128791 1 authentication.go:346] Continuing without authentication configuration. This may treat all requests as anonymous.
W0202 21:48:13.128800 1 authentication.go:347] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
I0202 21:48:13.217797 1 server.go:139] "Starting Kubernetes Scheduler" version="v1.23.2"
I0202 21:48:13.220150 1 secure_serving.go:200] Serving securely on 127.0.0.1:10259
I0202 21:48:13.224382 1 tlsconfig.go:240] "Starting DynamicServingCertificateController"
I0202 21:48:13.224493 1 configmap_cafile_content.go:201] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I0202 21:48:13.224527 1 shared_informer.go:240] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
E0202 21:48:13.236086 1 plugin.go:138] "getting namespace, assuming empty set of namespace labels" err="namespace \"kube-system\" not found" namespace="kube-system"
E0202 21:48:13.236124 1 plugin.go:138] "getting namespace, assuming empty set of namespace labels" err="namespace \"kube-system\" not found" namespace="kube-system"
I0202 21:48:13.324848 1 shared_informer.go:247] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
E0202 21:48:21.822002 1 reflector.go:138] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:205: Failed to watch *v1.ConfigMap: unknown (get configmaps)
E0202 21:48:21.822739 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.StatefulSet: unknown (get statefulsets.apps)
E0202 21:48:21.822815 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.ReplicationController: unknown (get replicationcontrollers)
E0202 21:48:21.822844 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.StorageClass: unknown (get storageclasses.storage.k8s.io)
E0202 21:48:21.822873 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Pod: unknown (get pods)
E0202 21:48:21.822892 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Namespace: unknown (get namespaces)
E0202 21:48:21.822911 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.ReplicaSet: unknown (get replicasets.apps)
E0202 21:48:21.822936 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.PodDisruptionBudget: unknown (get poddisruptionbudgets.policy)
E0202 21:48:21.822964 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.PersistentVolumeClaim: unknown (get persistentvolumeclaims)
E0202 21:48:21.822984 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1beta1.CSIStorageCapacity: unknown (get csistoragecapacities.storage.k8s.io)
E0202 21:48:21.823010 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Node: unknown (get nodes)
E0202 21:48:21.823055 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSINode: unknown (get csinodes.storage.k8s.io)
E0202 21:48:21.823080 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSIDriver: unknown (get csidrivers.storage.k8s.io)
E0202 21:48:21.823120 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.PersistentVolume: unknown (get persistentvolumes)
E0202 21:48:21.823148 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Service: unknown (get services)
*
* ==> kube-scheduler [7f57939fd732] <==
* E0202 21:47:32.223941 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
W0202 21:47:32.224017 1 reflector.go:324] k8s.io/client-go/informers/factory.go:134: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E0202 21:47:32.224039 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
W0202 21:47:32.224109 1 reflector.go:324] k8s.io/client-go/informers/factory.go:134: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E0202 21:47:32.224151 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
W0202 21:47:32.224264 1 reflector.go:324] k8s.io/client-go/informers/factory.go:134: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E0202 21:47:32.224282 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
W0202 21:47:33.145949 1 reflector.go:324] k8s.io/client-go/informers/factory.go:134: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E0202 21:47:33.145987 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
W0202 21:47:33.158906 1 reflector.go:324] k8s.io/client-go/informers/factory.go:134: failed to list *v1beta1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E0202 21:47:33.158947 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1beta1.CSIStorageCapacity: failed to list *v1beta1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
W0202 21:47:33.163812 1 reflector.go:324] k8s.io/client-go/informers/factory.go:134: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E0202 21:47:33.163842 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
W0202 21:47:33.164491 1 reflector.go:324] k8s.io/client-go/informers/factory.go:134: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E0202 21:47:33.164531 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
W0202 21:47:33.194774 1 reflector.go:324] k8s.io/client-go/informers/factory.go:134: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E0202 21:47:33.194807 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
W0202 21:47:33.209773 1 reflector.go:324] k8s.io/client-go/informers/factory.go:134: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E0202 21:47:33.209797 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
W0202 21:47:33.408342 1 reflector.go:324] k8s.io/client-go/informers/factory.go:134: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E0202 21:47:33.408371 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
I0202 21:47:33.920827 1 shared_informer.go:247] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
I0202 21:48:08.710725 1 configmap_cafile_content.go:222] "Shutting down controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I0202 21:48:08.711166 1 secure_serving.go:311] Stopped listening on 127.0.0.1:10259
I0202 21:48:08.711762 1 tlsconfig.go:255] "Shutting down DynamicServingCertificateController"
*
* ==> kubelet <==
* -- Logs begin at Wed 2022-02-02 21:47:20 UTC, end at Wed 2022-02-02 21:48:31 UTC. --
Feb 02 21:48:19 functional-20220202214710-386638 kubelet[5621]: I0202 21:48:19.338400 5621 scope.go:110] "RemoveContainer" containerID="1251a66fdbb5f805a7e800f9eeabecfa04f9ee346e6a984c81966f441a7b4fd7"
Feb 02 21:48:19 functional-20220202214710-386638 kubelet[5621]: E0202 21:48:19.920081 5621 remote_runtime.go:479] "StopContainer from runtime service failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: 26bb951a2f1e150bc66a6d600f18eb919d6a3a49f67b3dc307cb5515e06348ec" containerID="26bb951a2f1e150bc66a6d600f18eb919d6a3a49f67b3dc307cb5515e06348ec"
Feb 02 21:48:19 functional-20220202214710-386638 kubelet[5621]: E0202 21:48:19.920147 5621 kuberuntime_container.go:719] "Container termination failed with gracePeriod" err="rpc error: code = Unknown desc = Error response from daemon: No such container: 26bb951a2f1e150bc66a6d600f18eb919d6a3a49f67b3dc307cb5515e06348ec" pod="kube-system/kube-apiserver-functional-20220202214710-386638" podUID=ecd64b85c03f75ef813989b5d080682a containerName="kube-apiserver" containerID="docker://26bb951a2f1e150bc66a6d600f18eb919d6a3a49f67b3dc307cb5515e06348ec" gracePeriod=1
Feb 02 21:48:19 functional-20220202214710-386638 kubelet[5621]: E0202 21:48:19.920175 5621 kuberuntime_container.go:744] "Kill container failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: 26bb951a2f1e150bc66a6d600f18eb919d6a3a49f67b3dc307cb5515e06348ec" pod="kube-system/kube-apiserver-functional-20220202214710-386638" podUID=ecd64b85c03f75ef813989b5d080682a containerName="kube-apiserver" containerID={Type:docker ID:26bb951a2f1e150bc66a6d600f18eb919d6a3a49f67b3dc307cb5515e06348ec}
Feb 02 21:48:19 functional-20220202214710-386638 kubelet[5621]: E0202 21:48:19.921470 5621 kubelet.go:1777] failed to "KillContainer" for "kube-apiserver" with KillContainerError: "rpc error: code = Unknown desc = Error response from daemon: No such container: 26bb951a2f1e150bc66a6d600f18eb919d6a3a49f67b3dc307cb5515e06348ec"
Feb 02 21:48:19 functional-20220202214710-386638 kubelet[5621]: E0202 21:48:19.921522 5621 pod_workers.go:918] "Error syncing pod, skipping" err="failed to \"KillContainer\" for \"kube-apiserver\" with KillContainerError: \"rpc error: code = Unknown desc = Error response from daemon: No such container: 26bb951a2f1e150bc66a6d600f18eb919d6a3a49f67b3dc307cb5515e06348ec\"" pod="kube-system/kube-apiserver-functional-20220202214710-386638" podUID=ecd64b85c03f75ef813989b5d080682a
Feb 02 21:48:19 functional-20220202214710-386638 kubelet[5621]: I0202 21:48:19.922746 5621 kubelet_volumes.go:160] "Cleaned up orphaned pod volumes dir" podUID=ecd64b85c03f75ef813989b5d080682a path="/var/lib/kubelet/pods/ecd64b85c03f75ef813989b5d080682a/volumes"
Feb 02 21:48:20 functional-20220202214710-386638 kubelet[5621]: I0202 21:48:20.230722 5621 kubelet.go:1693] "Trying to delete pod" pod="kube-system/kube-apiserver-functional-20220202214710-386638" podUID=be7b8d8a-56b0-48f5-b841-e20b68886d3a
Feb 02 21:48:21 functional-20220202214710-386638 kubelet[5621]: W0202 21:48:21.822002 5621 reflector.go:324] object-"kube-system"/"coredns": failed to list *v1.ConfigMap: configmaps "coredns" is forbidden: User "system:node:functional-20220202214710-386638" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'functional-20220202214710-386638' and this object
Feb 02 21:48:21 functional-20220202214710-386638 kubelet[5621]: E0202 21:48:21.822047 5621 reflector.go:138] object-"kube-system"/"coredns": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "coredns" is forbidden: User "system:node:functional-20220202214710-386638" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'functional-20220202214710-386638' and this object
Feb 02 21:48:21 functional-20220202214710-386638 kubelet[5621]: E0202 21:48:21.822957 5621 projected.go:199] Error preparing data for projected volume kube-api-access-lt99n for pod kube-system/coredns-64897985d-qmcxc: failed to fetch token: serviceaccounts "coredns" is forbidden: User "system:node:functional-20220202214710-386638" cannot create resource "serviceaccounts/token" in API group "" in the namespace "kube-system": no relationship found between node 'functional-20220202214710-386638' and this object
Feb 02 21:48:21 functional-20220202214710-386638 kubelet[5621]: E0202 21:48:21.823049 5621 nestedpendingoperations.go:335] Operation for "{volumeName:kubernetes.io/projected/d0448ec2-8c41-4697-9419-17bc3267ec06-kube-api-access-lt99n podName:d0448ec2-8c41-4697-9419-17bc3267ec06 nodeName:}" failed. No retries permitted until 2022-02-02 21:48:22.823023056 +0000 UTC m=+7.381277125 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-lt99n" (UniqueName: "kubernetes.io/projected/d0448ec2-8c41-4697-9419-17bc3267ec06-kube-api-access-lt99n") pod "coredns-64897985d-qmcxc" (UID: "d0448ec2-8c41-4697-9419-17bc3267ec06") : failed to fetch token: serviceaccounts "coredns" is forbidden: User "system:node:functional-20220202214710-386638" cannot create resource "serviceaccounts/token" in API group "" in the namespace "kube-system": no relationship found between node 'functional-20220202214710-386638' and this object
Feb 02 21:48:21 functional-20220202214710-386638 kubelet[5621]: E0202 21:48:21.823119 5621 projected.go:199] Error preparing data for projected volume kube-api-access-xcpd7 for pod kube-system/kube-proxy-c2lnh: failed to fetch token: serviceaccounts "kube-proxy" is forbidden: User "system:node:functional-20220202214710-386638" cannot create resource "serviceaccounts/token" in API group "" in the namespace "kube-system": no relationship found between node 'functional-20220202214710-386638' and this object
Feb 02 21:48:21 functional-20220202214710-386638 kubelet[5621]: E0202 21:48:21.823157 5621 nestedpendingoperations.go:335] Operation for "{volumeName:kubernetes.io/projected/2e835a4a-ce75-4ea4-93dd-0473663c28e1-kube-api-access-xcpd7 podName:2e835a4a-ce75-4ea4-93dd-0473663c28e1 nodeName:}" failed. No retries permitted until 2022-02-02 21:48:22.823144662 +0000 UTC m=+7.381398724 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-xcpd7" (UniqueName: "kubernetes.io/projected/2e835a4a-ce75-4ea4-93dd-0473663c28e1-kube-api-access-xcpd7") pod "kube-proxy-c2lnh" (UID: "2e835a4a-ce75-4ea4-93dd-0473663c28e1") : failed to fetch token: serviceaccounts "kube-proxy" is forbidden: User "system:node:functional-20220202214710-386638" cannot create resource "serviceaccounts/token" in API group "" in the namespace "kube-system": no relationship found between node 'functional-20220202214710-386638' and this object
Feb 02 21:48:21 functional-20220202214710-386638 kubelet[5621]: W0202 21:48:21.824500 5621 reflector.go:324] object-"kube-system"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:functional-20220202214710-386638" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'functional-20220202214710-386638' and this object
Feb 02 21:48:21 functional-20220202214710-386638 kubelet[5621]: E0202 21:48:21.824559 5621 reflector.go:138] object-"kube-system"/"kube-root-ca.crt": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:functional-20220202214710-386638" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'functional-20220202214710-386638' and this object
Feb 02 21:48:21 functional-20220202214710-386638 kubelet[5621]: E0202 21:48:21.824629 5621 projected.go:199] Error preparing data for projected volume kube-api-access-zq69t for pod kube-system/storage-provisioner: failed to fetch token: serviceaccounts "storage-provisioner" is forbidden: User "system:node:functional-20220202214710-386638" cannot create resource "serviceaccounts/token" in API group "" in the namespace "kube-system": no relationship found between node 'functional-20220202214710-386638' and this object
Feb 02 21:48:21 functional-20220202214710-386638 kubelet[5621]: E0202 21:48:21.824694 5621 nestedpendingoperations.go:335] Operation for "{volumeName:kubernetes.io/projected/4660a715-0b0d-419a-a7b1-650bf4a8466f-kube-api-access-zq69t podName:4660a715-0b0d-419a-a7b1-650bf4a8466f nodeName:}" failed. No retries permitted until 2022-02-02 21:48:22.824675098 +0000 UTC m=+7.382929159 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-zq69t" (UniqueName: "kubernetes.io/projected/4660a715-0b0d-419a-a7b1-650bf4a8466f-kube-api-access-zq69t") pod "storage-provisioner" (UID: "4660a715-0b0d-419a-a7b1-650bf4a8466f") : failed to fetch token: serviceaccounts "storage-provisioner" is forbidden: User "system:node:functional-20220202214710-386638" cannot create resource "serviceaccounts/token" in API group "" in the namespace "kube-system": no relationship found between node 'functional-20220202214710-386638' and this object
Feb 02 21:48:21 functional-20220202214710-386638 kubelet[5621]: W0202 21:48:21.824788 5621 reflector.go:324] object-"kube-system"/"kube-proxy": failed to list *v1.ConfigMap: configmaps "kube-proxy" is forbidden: User "system:node:functional-20220202214710-386638" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'functional-20220202214710-386638' and this object
Feb 02 21:48:21 functional-20220202214710-386638 kubelet[5621]: E0202 21:48:21.824829 5621 reflector.go:138] object-"kube-system"/"kube-proxy": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "kube-proxy" is forbidden: User "system:node:functional-20220202214710-386638" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'functional-20220202214710-386638' and this object
Feb 02 21:48:22 functional-20220202214710-386638 kubelet[5621]: I0202 21:48:22.018560 5621 kubelet.go:1698] "Deleted mirror pod because it is outdated" pod="kube-system/kube-apiserver-functional-20220202214710-386638"
Feb 02 21:48:23 functional-20220202214710-386638 kubelet[5621]: I0202 21:48:23.248325 5621 kubelet.go:1693] "Trying to delete pod" pod="kube-system/kube-apiserver-functional-20220202214710-386638" podUID=be7b8d8a-56b0-48f5-b841-e20b68886d3a
Feb 02 21:48:23 functional-20220202214710-386638 kubelet[5621]: I0202 21:48:23.711928 5621 docker_sandbox.go:402] "Failed to read pod IP from plugin/docker" err="Couldn't find network status for kube-system/coredns-64897985d-qmcxc through plugin: invalid network status for"
Feb 02 21:48:23 functional-20220202214710-386638 kubelet[5621]: I0202 21:48:23.742026 5621 scope.go:110] "RemoveContainer" containerID="4e9cce7a10fc55f2dbbdc4177b6341887968004fb441bca08dc777522255a5c8"
Feb 02 21:48:24 functional-20220202214710-386638 kubelet[5621]: I0202 21:48:24.262101 5621 docker_sandbox.go:402] "Failed to read pod IP from plugin/docker" err="Couldn't find network status for kube-system/coredns-64897985d-qmcxc through plugin: invalid network status for"
*
* ==> storage-provisioner [4e9cce7a10fc] <==
* I0202 21:48:10.327653 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
F0202 21:48:10.330434 1 main.go:39] error getting server version: Get "https://10.96.0.1:443/version?timeout=32s": dial tcp 10.96.0.1:443: connect: connection refused
*
* ==> storage-provisioner [eebcc13f80f2] <==
* I0202 21:48:23.851838 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I0202 21:48:23.858861 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I0202 21:48:23.858897 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
-- /stdout --
helpers_test.go:255: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p functional-20220202214710-386638 -n functional-20220202214710-386638
helpers_test.go:262: (dbg) Run: kubectl --context functional-20220202214710-386638 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:271: non-running pods:
helpers_test.go:273: ======> post-mortem[TestFunctional/serial/ComponentHealth]: describe non-running pods <======
helpers_test.go:276: (dbg) Run: kubectl --context functional-20220202214710-386638 describe pod
helpers_test.go:276: (dbg) Non-zero exit: kubectl --context functional-20220202214710-386638 describe pod : exit status 1 (39.003493ms)
** stderr **
error: resource name may not be empty
** /stderr **
helpers_test.go:278: kubectl --context functional-20220202214710-386638 describe pod : exit status 1
--- FAIL: TestFunctional/serial/ComponentHealth (2.23s)