=== RUN TestMultiNode/serial/DeployApp2Nodes
multinode_test.go:479: (dbg) Run: out/minikube-linux-amd64 kubectl -p multinode-541903 -- apply -f ./testdata/multinodes/multinode-pod-dns-test.yaml
multinode_test.go:484: (dbg) Run: out/minikube-linux-amd64 kubectl -p multinode-541903 -- rollout status deployment/busybox
multinode_test.go:484: (dbg) Done: out/minikube-linux-amd64 kubectl -p multinode-541903 -- rollout status deployment/busybox: (2.056477551s)
multinode_test.go:490: (dbg) Run: out/minikube-linux-amd64 kubectl -p multinode-541903 -- get pods -o jsonpath='{.items[*].status.podIP}'
multinode_test.go:496: expected 2 Pod IPs but got 1
multinode_test.go:503: (dbg) Run: out/minikube-linux-amd64 kubectl -p multinode-541903 -- get pods -o jsonpath='{.items[*].metadata.name}'
multinode_test.go:511: (dbg) Run: out/minikube-linux-amd64 kubectl -p multinode-541903 -- exec busybox-6b86dd6d48-dtw6s -- nslookup kubernetes.io
multinode_test.go:511: (dbg) Non-zero exit: out/minikube-linux-amd64 kubectl -p multinode-541903 -- exec busybox-6b86dd6d48-dtw6s -- nslookup kubernetes.io: exit status 1 (182.333306ms)
-- stdout --
Server: 10.96.0.10
Address 1: 10.96.0.10
-- /stdout --
** stderr **
nslookup: can't resolve 'kubernetes.io'
command terminated with exit code 1
** /stderr **
multinode_test.go:513: Pod busybox-6b86dd6d48-dtw6s could not resolve 'kubernetes.io': exit status 1
multinode_test.go:511: (dbg) Run: out/minikube-linux-amd64 kubectl -p multinode-541903 -- exec busybox-6b86dd6d48-x6jgm -- nslookup kubernetes.io
multinode_test.go:521: (dbg) Run: out/minikube-linux-amd64 kubectl -p multinode-541903 -- exec busybox-6b86dd6d48-dtw6s -- nslookup kubernetes.default
multinode_test.go:521: (dbg) Non-zero exit: out/minikube-linux-amd64 kubectl -p multinode-541903 -- exec busybox-6b86dd6d48-dtw6s -- nslookup kubernetes.default: exit status 1 (171.559867ms)
-- stdout --
Server: 10.96.0.10
Address 1: 10.96.0.10
-- /stdout --
** stderr **
nslookup: can't resolve 'kubernetes.default'
command terminated with exit code 1
** /stderr **
multinode_test.go:523: Pod busybox-6b86dd6d48-dtw6s could not resolve 'kubernetes.default': exit status 1
multinode_test.go:521: (dbg) Run: out/minikube-linux-amd64 kubectl -p multinode-541903 -- exec busybox-6b86dd6d48-x6jgm -- nslookup kubernetes.default
multinode_test.go:529: (dbg) Run: out/minikube-linux-amd64 kubectl -p multinode-541903 -- exec busybox-6b86dd6d48-dtw6s -- nslookup kubernetes.default.svc.cluster.local
multinode_test.go:529: (dbg) Non-zero exit: out/minikube-linux-amd64 kubectl -p multinode-541903 -- exec busybox-6b86dd6d48-dtw6s -- nslookup kubernetes.default.svc.cluster.local: exit status 1 (185.915793ms)
-- stdout --
Server: 10.96.0.10
Address 1: 10.96.0.10
-- /stdout --
** stderr **
nslookup: can't resolve 'kubernetes.default.svc.cluster.local'
command terminated with exit code 1
** /stderr **
multinode_test.go:531: Pod busybox-6b86dd6d48-dtw6s could not resolve local service (kubernetes.default.svc.cluster.local): exit status 1
multinode_test.go:529: (dbg) Run: out/minikube-linux-amd64 kubectl -p multinode-541903 -- exec busybox-6b86dd6d48-x6jgm -- nslookup kubernetes.default.svc.cluster.local
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======> post-mortem[TestMultiNode/serial/DeployApp2Nodes]: docker inspect <======
helpers_test.go:231: (dbg) Run: docker inspect multinode-541903
helpers_test.go:235: (dbg) docker inspect multinode-541903:
-- stdout --
[
{
"Id": "1d27206ca39a65e916c323d4d71d0c3b28802eaddd46ac07bd396d621b9add88",
"Created": "2023-02-23T04:37:12.560902337Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 155066,
"ExitCode": 0,
"Error": "",
"StartedAt": "2023-02-23T04:37:12.910247722Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:b74f629d1852fc20b6085123d98944654faddf1d7e642b41aa2866d7a48081ea",
"ResolvConfPath": "/var/lib/docker/containers/1d27206ca39a65e916c323d4d71d0c3b28802eaddd46ac07bd396d621b9add88/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/1d27206ca39a65e916c323d4d71d0c3b28802eaddd46ac07bd396d621b9add88/hostname",
"HostsPath": "/var/lib/docker/containers/1d27206ca39a65e916c323d4d71d0c3b28802eaddd46ac07bd396d621b9add88/hosts",
"LogPath": "/var/lib/docker/containers/1d27206ca39a65e916c323d4d71d0c3b28802eaddd46ac07bd396d621b9add88/1d27206ca39a65e916c323d4d71d0c3b28802eaddd46ac07bd396d621b9add88-json.log",
"Name": "/multinode-541903",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"multinode-541903:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "multinode-541903",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 2306867200,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 4613734400,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": null,
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"LowerDir": "/var/lib/docker/overlay2/29664ba0c052debe94b33bc6597e58752c12a6a4be15d08d58ffea3a75ef443e-init/diff:/var/lib/docker/overlay2/ce11df4114e66bfbd48e5a1426c46fba069f3da991837cf803c2e8b8c3eda6d0/diff:/var/lib/docker/overlay2/67867042f6e8afdc18342af2eb336ad4c5c0a7401fc637e1fc5d8e5fb5da9c54/diff:/var/lib/docker/overlay2/6f60a7f51cd5411c4b238cbec1d9eaf9e9d0fe2c0cede5c8b6dda2cc1c1a4dae/diff:/var/lib/docker/overlay2/f16b6a02a409b03e0dd8e4132898a088c958a65daa5c41d68382933cb25cbf89/diff:/var/lib/docker/overlay2/c67a6477fb334471c2250630b01e7737dbdbff49c8042fe89c46c91015b4ca93/diff:/var/lib/docker/overlay2/759946ae41bd3dfc914e66cfa75b5a8235623c706571b593d11de851063660ff/diff:/var/lib/docker/overlay2/76350694ed8c71793018ca57024aa570f2dd716677a27e7c63c0c1ace7f86863/diff:/var/lib/docker/overlay2/1256271676dd13740fdc61178553e1b8bb86e0764d6cb0782b8dae00f36507b3/diff:/var/lib/docker/overlay2/0e69b6a8005382927f8aa3da9209cecfff85e70cf6c18e74dfa33cefe1654f7c/diff:/var/lib/docker/overlay2/7c4343
3470d3082f60e71ef482b5abf15e302185346f4b6f0ec09fa138072a58/diff:/var/lib/docker/overlay2/62703523372d3e1e2225086f063de172bef4faaed6984fe6e5419b67d77def10/diff:/var/lib/docker/overlay2/ebcaa0ba65daf5b3e2080d1a0fbaed6f24b4a233c7e891c7b81b1cee6bf2cedc/diff:/var/lib/docker/overlay2/be200cbd5f8a9354199dd45994f24c87e0411dc5155990180f5a0704ddbd6a7f/diff:/var/lib/docker/overlay2/2c8e96b711f711507975a3691795adb22a0be94ca49e9049e478be931313ad11/diff:/var/lib/docker/overlay2/cd8d07503ff6d83a45883d455890d1654107762b732dcc1f3bfceb562be85e70/diff:/var/lib/docker/overlay2/764371ec9e5bf3b70ce35ff48af07492ecb15f74bdbf45c71cdc9517fc728053/diff:/var/lib/docker/overlay2/fa3a703bdaeb2188baa9e3f51895dd0f37fb9408833a39b0e5ce2b999d8b5e3b/diff:/var/lib/docker/overlay2/9d3047008e6db82dd0f3755797a6985a805a80f21d7181cb7a834b6bec6c4236/diff:/var/lib/docker/overlay2/7f9dd8c5ff7351af1c2030799512d6a9195eb3fcaa437b669d090f95add1b538/diff:/var/lib/docker/overlay2/9874bcc9104691555a13f4807b8645aa337bb605cfe7c4c3d999c352bf33108a/diff:/var/lib/d
ocker/overlay2/379a0c4086fa34fac106ef4ac3a24efe6cf22f571ebc29fb7bb8b6970482138c/diff:/var/lib/docker/overlay2/ae6542469629e23a09e75f9a4078816c27f5e5c78355e26882429e82a14b0032/diff:/var/lib/docker/overlay2/2fa972d6ec076c136c58b91e721951ccb586489a4b63ad19cd86315f551aa876/diff:/var/lib/docker/overlay2/01989adcef7287b715d2b4b23904420d3387d0a8658574c0f8e5c07fe4ccb7e5/diff:/var/lib/docker/overlay2/410a2d9031c54c0ba9264f4dadbec3ac1b59268474102e484b2e8818be9b4fe4/diff:/var/lib/docker/overlay2/b97341c23172694f1cbf5fa161c877a5dd5f975ee1862633f23cbfa5af81e27d/diff:/var/lib/docker/overlay2/e9ca6b413e94904f8d1bc476869b0d2ada9a8033d6ccba23949fc34fd91a6055/diff:/var/lib/docker/overlay2/de4ce7e1e4781e9531c31405f53bbe4ab986ac8b16b2d27e86ac990551cf87ba/diff:/var/lib/docker/overlay2/6e6aa1c7b0b1a4aa5b95564435bac895514f2b12a5d4f51516163f0210f41429/diff:/var/lib/docker/overlay2/c39263178aa7f1210d56bc7f166c3b432086405849c4cdc45771a67f3d909d59/diff:/var/lib/docker/overlay2/55dc6afe612178216a18ed545066ceb72dd839cbb03a5111900c0c5cd4c
6355f/diff:/var/lib/docker/overlay2/f817395bf92f200738853c3b96425a94d2d199a514d7a748c518b354a6ccfb8c/diff:/var/lib/docker/overlay2/fe5b47e6a1ae2c874177508e870ea98506ae220a6aa83d02fe28129bcedf0d55/diff:/var/lib/docker/overlay2/5000c9c08db27bab6b64b973d49c1852269aeb8d541ceaaea8e827b672682f06/diff:/var/lib/docker/overlay2/4b2e0970175e214d30c9de79481841f7d8d0047489d760ac1586aec0da18504b/diff:/var/lib/docker/overlay2/2cbfc460cc7a2e3fcef0b98680fd5c35cffb968718b6734a5a4623f6319115d0/diff:/var/lib/docker/overlay2/83115e92a6884a211247210828b926f7406434498585dc4c63a72ba551840d6c/diff:/var/lib/docker/overlay2/f054795703db72e95af1cbc1f0b521fdcab36ce052796c8ad0d7c0b3768736fb/diff:/var/lib/docker/overlay2/98a79adf5fc52ffb68cd8949ead8e06275312936c8b0af96acdf26d2b1faae37/diff:/var/lib/docker/overlay2/a6e8e3f2f2fb2d4d3853e3eac262e60e60df39d4f02d7bfb4a6d0108e969ed28/diff:/var/lib/docker/overlay2/38ceddf66b01c4b1744a41eee4a1b08cbd12f555385fd69aab17e8ed0014cb41/diff:/var/lib/docker/overlay2/a36e0f01a73fe397a0d178156c4afc47a2a50e
d52e77fcded872bcb15009f6ac/diff",
"MergedDir": "/var/lib/docker/overlay2/29664ba0c052debe94b33bc6597e58752c12a6a4be15d08d58ffea3a75ef443e/merged",
"UpperDir": "/var/lib/docker/overlay2/29664ba0c052debe94b33bc6597e58752c12a6a4be15d08d58ffea3a75ef443e/diff",
"WorkDir": "/var/lib/docker/overlay2/29664ba0c052debe94b33bc6597e58752c12a6a4be15d08d58ffea3a75ef443e/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "multinode-541903",
"Source": "/var/lib/docker/volumes/multinode-541903/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "multinode-541903",
"Domainname": "",
"User": "root",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.37-1676506612-15768@sha256:cc1cb283879fedae93096946a6953a50075ed680d467a47cbf669e0ed7d3aebc",
"Volumes": null,
"WorkingDir": "",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "multinode-541903",
"name.minikube.sigs.k8s.io": "multinode-541903",
"org.opencontainers.image.ref.name": "ubuntu",
"org.opencontainers.image.version": "20.04",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "f6e8fc67c8c4007c994ebacfc394b80b66f967fdeb9aa2478324a8f271306312",
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32855"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32854"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32851"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32853"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32852"
}
]
},
"SandboxKey": "/var/run/docker/netns/f6e8fc67c8c4",
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"multinode-541903": {
"IPAMConfig": {
"IPv4Address": "192.168.58.2"
},
"Links": null,
"Aliases": [
"1d27206ca39a",
"multinode-541903"
],
"NetworkID": "c17496e5bdb69195742044cbb625d4109bf78bab9793fcb10ffae192eb76a11a",
"EndpointID": "0316122e1323bbcdcbfa45570de3ab755db5d2147c7ac3cd9270081a89a992c7",
"Gateway": "192.168.58.1",
"IPAddress": "192.168.58.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"MacAddress": "02:42:c0:a8:3a:02",
"DriverOpts": null
}
}
}
}
]
-- /stdout --
helpers_test.go:239: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p multinode-541903 -n multinode-541903
helpers_test.go:244: <<< TestMultiNode/serial/DeployApp2Nodes FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======> post-mortem[TestMultiNode/serial/DeployApp2Nodes]: minikube logs <======
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 -p multinode-541903 logs -n 25
helpers_test.go:247: (dbg) Done: out/minikube-linux-amd64 -p multinode-541903 logs -n 25: (1.025144136s)
helpers_test.go:252: TestMultiNode/serial/DeployApp2Nodes logs:
-- stdout --
*
* ==> Audit <==
* |---------|---------------------------------------------------|----------------------|---------|---------|---------------------|---------------------|
| Command | Args | Profile | User | Version | Start Time | End Time |
|---------|---------------------------------------------------|----------------------|---------|---------|---------------------|---------------------|
| start | -p second-766844 | second-766844 | jenkins | v1.29.0 | 23 Feb 23 04:36 UTC | 23 Feb 23 04:36 UTC |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| delete | -p second-766844 | second-766844 | jenkins | v1.29.0 | 23 Feb 23 04:36 UTC | 23 Feb 23 04:36 UTC |
| delete | -p first-763826 | first-763826 | jenkins | v1.29.0 | 23 Feb 23 04:36 UTC | 23 Feb 23 04:36 UTC |
| start | -p mount-start-1-844680 | mount-start-1-844680 | jenkins | v1.29.0 | 23 Feb 23 04:36 UTC | 23 Feb 23 04:36 UTC |
| | --memory=2048 --mount | | | | | |
| | --mount-gid 0 --mount-msize | | | | | |
| | 6543 --mount-port 46464 | | | | | |
| | --mount-uid 0 --no-kubernetes | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| ssh | mount-start-1-844680 ssh -- ls | mount-start-1-844680 | jenkins | v1.29.0 | 23 Feb 23 04:36 UTC | 23 Feb 23 04:36 UTC |
| | /minikube-host | | | | | |
| start | -p mount-start-2-855962 | mount-start-2-855962 | jenkins | v1.29.0 | 23 Feb 23 04:36 UTC | 23 Feb 23 04:36 UTC |
| | --memory=2048 --mount | | | | | |
| | --mount-gid 0 --mount-msize | | | | | |
| | 6543 --mount-port 46465 | | | | | |
| | --mount-uid 0 --no-kubernetes | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| ssh | mount-start-2-855962 ssh -- ls | mount-start-2-855962 | jenkins | v1.29.0 | 23 Feb 23 04:36 UTC | 23 Feb 23 04:36 UTC |
| | /minikube-host | | | | | |
| delete | -p mount-start-1-844680 | mount-start-1-844680 | jenkins | v1.29.0 | 23 Feb 23 04:36 UTC | 23 Feb 23 04:36 UTC |
| | --alsologtostderr -v=5 | | | | | |
| ssh | mount-start-2-855962 ssh -- ls | mount-start-2-855962 | jenkins | v1.29.0 | 23 Feb 23 04:36 UTC | 23 Feb 23 04:36 UTC |
| | /minikube-host | | | | | |
| stop | -p mount-start-2-855962 | mount-start-2-855962 | jenkins | v1.29.0 | 23 Feb 23 04:36 UTC | 23 Feb 23 04:36 UTC |
| start | -p mount-start-2-855962 | mount-start-2-855962 | jenkins | v1.29.0 | 23 Feb 23 04:36 UTC | 23 Feb 23 04:37 UTC |
| ssh | mount-start-2-855962 ssh -- ls | mount-start-2-855962 | jenkins | v1.29.0 | 23 Feb 23 04:37 UTC | 23 Feb 23 04:37 UTC |
| | /minikube-host | | | | | |
| delete | -p mount-start-2-855962 | mount-start-2-855962 | jenkins | v1.29.0 | 23 Feb 23 04:37 UTC | 23 Feb 23 04:37 UTC |
| delete | -p mount-start-1-844680 | mount-start-1-844680 | jenkins | v1.29.0 | 23 Feb 23 04:37 UTC | 23 Feb 23 04:37 UTC |
| start | -p multinode-541903 | multinode-541903 | jenkins | v1.29.0 | 23 Feb 23 04:37 UTC | 23 Feb 23 04:38 UTC |
| | --wait=true --memory=2200 | | | | | |
| | --nodes=2 -v=8 | | | | | |
| | --alsologtostderr | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| kubectl | -p multinode-541903 -- apply -f | multinode-541903 | jenkins | v1.29.0 | 23 Feb 23 04:38 UTC | 23 Feb 23 04:38 UTC |
| | ./testdata/multinodes/multinode-pod-dns-test.yaml | | | | | |
| kubectl | -p multinode-541903 -- rollout | multinode-541903 | jenkins | v1.29.0 | 23 Feb 23 04:38 UTC | 23 Feb 23 04:38 UTC |
| | status deployment/busybox | | | | | |
| kubectl | -p multinode-541903 -- get pods -o | multinode-541903 | jenkins | v1.29.0 | 23 Feb 23 04:38 UTC | 23 Feb 23 04:38 UTC |
| | jsonpath='{.items[*].status.podIP}' | | | | | |
| kubectl | -p multinode-541903 -- get pods -o | multinode-541903 | jenkins | v1.29.0 | 23 Feb 23 04:38 UTC | 23 Feb 23 04:38 UTC |
| | jsonpath='{.items[*].metadata.name}' | | | | | |
| kubectl | -p multinode-541903 -- exec | multinode-541903 | jenkins | v1.29.0 | 23 Feb 23 04:38 UTC | |
| | busybox-6b86dd6d48-dtw6s -- | | | | | |
| | nslookup kubernetes.io | | | | | |
| kubectl | -p multinode-541903 -- exec | multinode-541903 | jenkins | v1.29.0 | 23 Feb 23 04:38 UTC | 23 Feb 23 04:38 UTC |
| | busybox-6b86dd6d48-x6jgm -- | | | | | |
| | nslookup kubernetes.io | | | | | |
| kubectl | -p multinode-541903 -- exec | multinode-541903 | jenkins | v1.29.0 | 23 Feb 23 04:38 UTC | |
| | busybox-6b86dd6d48-dtw6s -- | | | | | |
| | nslookup kubernetes.default | | | | | |
| kubectl | -p multinode-541903 -- exec | multinode-541903 | jenkins | v1.29.0 | 23 Feb 23 04:38 UTC | 23 Feb 23 04:38 UTC |
| | busybox-6b86dd6d48-x6jgm -- | | | | | |
| | nslookup kubernetes.default | | | | | |
| kubectl | -p multinode-541903 -- exec | multinode-541903 | jenkins | v1.29.0 | 23 Feb 23 04:38 UTC | |
| | busybox-6b86dd6d48-dtw6s -- nslookup | | | | | |
| | kubernetes.default.svc.cluster.local | | | | | |
| kubectl | -p multinode-541903 -- exec | multinode-541903 | jenkins | v1.29.0 | 23 Feb 23 04:38 UTC | 23 Feb 23 04:38 UTC |
| | busybox-6b86dd6d48-x6jgm -- nslookup | | | | | |
| | kubernetes.default.svc.cluster.local | | | | | |
|---------|---------------------------------------------------|----------------------|---------|---------|---------------------|---------------------|
*
* ==> Last Start <==
* Log file created at: 2023/02/23 04:37:06
Running on machine: ubuntu-20-agent-14
Binary: Built with gc go1.20.1 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0223 04:37:06.199106 154064 out.go:296] Setting OutFile to fd 1 ...
I0223 04:37:06.199276 154064 out.go:343] TERM=,COLORTERM=, which probably does not support color
I0223 04:37:06.199284 154064 out.go:309] Setting ErrFile to fd 2...
I0223 04:37:06.199289 154064 out.go:343] TERM=,COLORTERM=, which probably does not support color
I0223 04:37:06.199383 154064 root.go:336] Updating PATH: /home/jenkins/minikube-integration/15909-3701/.minikube/bin
I0223 04:37:06.199895 154064 out.go:303] Setting JSON to false
I0223 04:37:06.201113 154064 start.go:125] hostinfo: {"hostname":"ubuntu-20-agent-14","uptime":1178,"bootTime":1677125848,"procs":767,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1029-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I0223 04:37:06.201167 154064 start.go:135] virtualization: kvm guest
I0223 04:37:06.204245 154064 out.go:177] * [multinode-541903] minikube v1.29.0 on Ubuntu 20.04 (kvm/amd64)
I0223 04:37:06.205598 154064 out.go:177] - MINIKUBE_LOCATION=15909
I0223 04:37:06.205598 154064 notify.go:220] Checking for updates...
I0223 04:37:06.207006 154064 out.go:177] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0223 04:37:06.208332 154064 out.go:177] - KUBECONFIG=/home/jenkins/minikube-integration/15909-3701/kubeconfig
I0223 04:37:06.209658 154064 out.go:177] - MINIKUBE_HOME=/home/jenkins/minikube-integration/15909-3701/.minikube
I0223 04:37:06.211157 154064 out.go:177] - MINIKUBE_BIN=out/minikube-linux-amd64
I0223 04:37:06.212508 154064 out.go:177] - MINIKUBE_FORCE_SYSTEMD=
I0223 04:37:06.213873 154064 driver.go:365] Setting default libvirt URI to qemu:///system
I0223 04:37:06.281507 154064 docker.go:121] docker version: linux-23.0.1:Docker Engine - Community
I0223 04:37:06.281616 154064 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0223 04:37:06.392794 154064 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:25 OomKillDisable:true NGoroutines:32 SystemTime:2023-02-23 04:37:06.384819398 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1029-gcp OperatingSystem:Ubuntu 20.04.5 LTS OSType:linux Archi
tecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33660661760 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-14 Labels:[] ExperimentalBuild:false ServerVersion:23.0.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:2456e983eb9e37e47538f59ea18f2043c9a73640 Expected:2456e983eb9e37e47538f59ea18f2043c9a73640} RuncCommit:{ID:v1.1.4-0-g5fd4c4d Expected:v1.1.4-0-g5fd4c4d} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.10.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.16.0] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0223 04:37:06.392885 154064 docker.go:294] overlay module found
I0223 04:37:06.394805 154064 out.go:177] * Using the docker driver based on user configuration
I0223 04:37:06.396103 154064 start.go:296] selected driver: docker
I0223 04:37:06.396113 154064 start.go:857] validating driver "docker" against <nil>
I0223 04:37:06.396123 154064 start.go:868] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0223 04:37:06.396790 154064 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0223 04:37:06.508592 154064 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:25 OomKillDisable:true NGoroutines:32 SystemTime:2023-02-23 04:37:06.501128319 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1029-gcp OperatingSystem:Ubuntu 20.04.5 LTS OSType:linux Archi
tecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33660661760 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-14 Labels:[] ExperimentalBuild:false ServerVersion:23.0.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:2456e983eb9e37e47538f59ea18f2043c9a73640 Expected:2456e983eb9e37e47538f59ea18f2043c9a73640} RuncCommit:{ID:v1.1.4-0-g5fd4c4d Expected:v1.1.4-0-g5fd4c4d} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.10.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.16.0] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0223 04:37:06.508687 154064 start_flags.go:305] no existing cluster config was found, will generate one from the flags
I0223 04:37:06.508880 154064 start_flags.go:919] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0223 04:37:06.510502 154064 out.go:177] * Using Docker driver with root privileges
I0223 04:37:06.511787 154064 cni.go:84] Creating CNI manager for ""
I0223 04:37:06.511801 154064 cni.go:136] 0 nodes found, recommending kindnet
I0223 04:37:06.511807 154064 start_flags.go:314] Found "CNI" CNI - setting NetworkPlugin=cni
I0223 04:37:06.511827 154064 start_flags.go:319] config:
{Name:multinode-541903 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.37-1676506612-15768@sha256:cc1cb283879fedae93096946a6953a50075ed680d467a47cbf669e0ed7d3aebc Memory:2200 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.26.1 ClusterName:multinode-541903 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:do
cker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP:}
I0223 04:37:06.513498 154064 out.go:177] * Starting control plane node multinode-541903 in cluster multinode-541903
I0223 04:37:06.514818 154064 cache.go:120] Beginning downloading kic base image for docker with docker
I0223 04:37:06.516079 154064 out.go:177] * Pulling base image ...
I0223 04:37:06.517314 154064 preload.go:132] Checking if preload exists for k8s version v1.26.1 and runtime docker
I0223 04:37:06.517341 154064 preload.go:148] Found local preload: /home/jenkins/minikube-integration/15909-3701/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.26.1-docker-overlay2-amd64.tar.lz4
I0223 04:37:06.517347 154064 image.go:77] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.37-1676506612-15768@sha256:cc1cb283879fedae93096946a6953a50075ed680d467a47cbf669e0ed7d3aebc in local docker daemon
I0223 04:37:06.517351 154064 cache.go:57] Caching tarball of preloaded images
I0223 04:37:06.517484 154064 preload.go:174] Found /home/jenkins/minikube-integration/15909-3701/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.26.1-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I0223 04:37:06.517498 154064 cache.go:60] Finished verifying existence of preloaded tar for v1.26.1 on docker
I0223 04:37:06.517778 154064 profile.go:148] Saving config to /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/config.json ...
I0223 04:37:06.517800 154064 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/config.json: {Name:mk856b2305ed49d6dea7d3a9bc0b566667b8c095 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0223 04:37:06.580154 154064 image.go:81] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.37-1676506612-15768@sha256:cc1cb283879fedae93096946a6953a50075ed680d467a47cbf669e0ed7d3aebc in local docker daemon, skipping pull
I0223 04:37:06.580178 154064 cache.go:143] gcr.io/k8s-minikube/kicbase-builds:v0.0.37-1676506612-15768@sha256:cc1cb283879fedae93096946a6953a50075ed680d467a47cbf669e0ed7d3aebc exists in daemon, skipping load
I0223 04:37:06.580196 154064 cache.go:193] Successfully downloaded all kic artifacts
I0223 04:37:06.580229 154064 start.go:364] acquiring machines lock for multinode-541903: {Name:mk6b341dcb12ac869834450f5d5bc79d7a95fa3a Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0223 04:37:06.580322 154064 start.go:368] acquired machines lock for "multinode-541903" in 74.108µs
I0223 04:37:06.580343 154064 start.go:93] Provisioning new machine with config: &{Name:multinode-541903 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.37-1676506612-15768@sha256:cc1cb283879fedae93096946a6953a50075ed680d467a47cbf669e0ed7d3aebc Memory:2200 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.26.1 ClusterName:multinode-541903 Namespace:default APIServerName:miniku
beCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.26.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:fal
se DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP:} &{Name: IP: Port:8443 KubernetesVersion:v1.26.1 ContainerRuntime:docker ControlPlane:true Worker:true}
I0223 04:37:06.580425 154064 start.go:125] createHost starting for "" (driver="docker")
I0223 04:37:06.582519 154064 out.go:204] * Creating docker container (CPUs=2, Memory=2200MB) ...
I0223 04:37:06.582742 154064 start.go:159] libmachine.API.Create for "multinode-541903" (driver="docker")
I0223 04:37:06.582770 154064 client.go:168] LocalClient.Create starting
I0223 04:37:06.582827 154064 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/15909-3701/.minikube/certs/ca.pem
I0223 04:37:06.582863 154064 main.go:141] libmachine: Decoding PEM data...
I0223 04:37:06.582880 154064 main.go:141] libmachine: Parsing certificate...
I0223 04:37:06.582945 154064 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/15909-3701/.minikube/certs/cert.pem
I0223 04:37:06.582964 154064 main.go:141] libmachine: Decoding PEM data...
I0223 04:37:06.582972 154064 main.go:141] libmachine: Parsing certificate...
I0223 04:37:06.583267 154064 cli_runner.go:164] Run: docker network inspect multinode-541903 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0223 04:37:06.646780 154064 cli_runner.go:211] docker network inspect multinode-541903 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0223 04:37:06.646835 154064 network_create.go:281] running [docker network inspect multinode-541903] to gather additional debugging logs...
I0223 04:37:06.646849 154064 cli_runner.go:164] Run: docker network inspect multinode-541903
W0223 04:37:06.709129 154064 cli_runner.go:211] docker network inspect multinode-541903 returned with exit code 1
I0223 04:37:06.709159 154064 network_create.go:284] error running [docker network inspect multinode-541903]: docker network inspect multinode-541903: exit status 1
stdout:
[]
stderr:
Error response from daemon: network multinode-541903 not found
I0223 04:37:06.709169 154064 network_create.go:286] output of [docker network inspect multinode-541903]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network multinode-541903 not found
** /stderr **
I0223 04:37:06.709217 154064 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0223 04:37:06.772030 154064 network.go:214] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-a36995994bb4 IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:02:42:cf:40:7f:47} reservation:<nil>}
I0223 04:37:06.772477 154064 network.go:209] using free private subnet 192.168.58.0/24: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001528d10}
I0223 04:37:06.772501 154064 network_create.go:123] attempt to create docker network multinode-541903 192.168.58.0/24 with gateway 192.168.58.1 and MTU of 1500 ...
I0223 04:37:06.772544 154064 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.58.0/24 --gateway=192.168.58.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=multinode-541903 multinode-541903
I0223 04:37:06.870184 154064 network_create.go:107] docker network multinode-541903 192.168.58.0/24 created
I0223 04:37:06.870210 154064 kic.go:117] calculated static IP "192.168.58.2" for the "multinode-541903" container
I0223 04:37:06.870261 154064 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0223 04:37:06.932092 154064 cli_runner.go:164] Run: docker volume create multinode-541903 --label name.minikube.sigs.k8s.io=multinode-541903 --label created_by.minikube.sigs.k8s.io=true
I0223 04:37:06.994004 154064 oci.go:103] Successfully created a docker volume multinode-541903
I0223 04:37:06.994092 154064 cli_runner.go:164] Run: docker run --rm --name multinode-541903-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=multinode-541903 --entrypoint /usr/bin/test -v multinode-541903:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.37-1676506612-15768@sha256:cc1cb283879fedae93096946a6953a50075ed680d467a47cbf669e0ed7d3aebc -d /var/lib
I0223 04:37:07.597899 154064 oci.go:107] Successfully prepared a docker volume multinode-541903
I0223 04:37:07.597935 154064 preload.go:132] Checking if preload exists for k8s version v1.26.1 and runtime docker
I0223 04:37:07.597957 154064 kic.go:190] Starting extracting preloaded images to volume ...
I0223 04:37:07.598011 154064 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/15909-3701/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.26.1-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v multinode-541903:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.37-1676506612-15768@sha256:cc1cb283879fedae93096946a6953a50075ed680d467a47cbf669e0ed7d3aebc -I lz4 -xf /preloaded.tar -C /extractDir
I0223 04:37:12.381573 154064 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/15909-3701/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.26.1-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v multinode-541903:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.37-1676506612-15768@sha256:cc1cb283879fedae93096946a6953a50075ed680d467a47cbf669e0ed7d3aebc -I lz4 -xf /preloaded.tar -C /extractDir: (4.783485989s)
I0223 04:37:12.381606 154064 kic.go:199] duration metric: took 4.783647 seconds to extract preloaded images to volume
W0223 04:37:12.381720 154064 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I0223 04:37:12.381817 154064 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0223 04:37:12.497825 154064 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname multinode-541903 --name multinode-541903 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=multinode-541903 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=multinode-541903 --network multinode-541903 --ip 192.168.58.2 --volume multinode-541903:/var --security-opt apparmor=unconfined --memory=2200mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.37-1676506612-15768@sha256:cc1cb283879fedae93096946a6953a50075ed680d467a47cbf669e0ed7d3aebc
I0223 04:37:12.918331 154064 cli_runner.go:164] Run: docker container inspect multinode-541903 --format={{.State.Running}}
I0223 04:37:12.984456 154064 cli_runner.go:164] Run: docker container inspect multinode-541903 --format={{.State.Status}}
I0223 04:37:13.051481 154064 cli_runner.go:164] Run: docker exec multinode-541903 stat /var/lib/dpkg/alternatives/iptables
I0223 04:37:13.167609 154064 oci.go:144] the created container "multinode-541903" has a running status.
I0223 04:37:13.167637 154064 kic.go:221] Creating ssh key for kic: /home/jenkins/minikube-integration/15909-3701/.minikube/machines/multinode-541903/id_rsa...
I0223 04:37:13.310270 154064 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/15909-3701/.minikube/machines/multinode-541903/id_rsa.pub -> /home/docker/.ssh/authorized_keys
I0223 04:37:13.310313 154064 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/15909-3701/.minikube/machines/multinode-541903/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0223 04:37:13.436692 154064 cli_runner.go:164] Run: docker container inspect multinode-541903 --format={{.State.Status}}
I0223 04:37:13.503783 154064 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0223 04:37:13.503802 154064 kic_runner.go:114] Args: [docker exec --privileged multinode-541903 chown docker:docker /home/docker/.ssh/authorized_keys]
I0223 04:37:13.615686 154064 cli_runner.go:164] Run: docker container inspect multinode-541903 --format={{.State.Status}}
I0223 04:37:13.679313 154064 machine.go:88] provisioning docker machine ...
I0223 04:37:13.679351 154064 ubuntu.go:169] provisioning hostname "multinode-541903"
I0223 04:37:13.679405 154064 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-541903
I0223 04:37:13.739580 154064 main.go:141] libmachine: Using SSH client type: native
I0223 04:37:13.740046 154064 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x17560a0] 0x1759120 <nil> [] 0s} 127.0.0.1 32855 <nil> <nil>}
I0223 04:37:13.740069 154064 main.go:141] libmachine: About to run SSH command:
sudo hostname multinode-541903 && echo "multinode-541903" | sudo tee /etc/hostname
I0223 04:37:13.875619 154064 main.go:141] libmachine: SSH cmd err, output: <nil>: multinode-541903
I0223 04:37:13.875685 154064 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-541903
I0223 04:37:13.940433 154064 main.go:141] libmachine: Using SSH client type: native
I0223 04:37:13.940844 154064 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x17560a0] 0x1759120 <nil> [] 0s} 127.0.0.1 32855 <nil> <nil>}
I0223 04:37:13.940876 154064 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\smultinode-541903' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 multinode-541903/g' /etc/hosts;
else
echo '127.0.1.1 multinode-541903' | sudo tee -a /etc/hosts;
fi
fi
I0223 04:37:14.067132 154064 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0223 04:37:14.067163 154064 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/15909-3701/.minikube CaCertPath:/home/jenkins/minikube-integration/15909-3701/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/15909-3701/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/15909-3701/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/15909-3701/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/15909-3701/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/15909-3701/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/15909-3701/.minikube}
I0223 04:37:14.067185 154064 ubuntu.go:177] setting up certificates
I0223 04:37:14.067195 154064 provision.go:83] configureAuth start
I0223 04:37:14.067247 154064 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" multinode-541903
I0223 04:37:14.128731 154064 provision.go:138] copyHostCerts
I0223 04:37:14.128770 154064 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/15909-3701/.minikube/certs/ca.pem -> /home/jenkins/minikube-integration/15909-3701/.minikube/ca.pem
I0223 04:37:14.128802 154064 exec_runner.go:144] found /home/jenkins/minikube-integration/15909-3701/.minikube/ca.pem, removing ...
I0223 04:37:14.128810 154064 exec_runner.go:207] rm: /home/jenkins/minikube-integration/15909-3701/.minikube/ca.pem
I0223 04:37:14.128882 154064 exec_runner.go:151] cp: /home/jenkins/minikube-integration/15909-3701/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/15909-3701/.minikube/ca.pem (1082 bytes)
I0223 04:37:14.128961 154064 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/15909-3701/.minikube/certs/cert.pem -> /home/jenkins/minikube-integration/15909-3701/.minikube/cert.pem
I0223 04:37:14.128987 154064 exec_runner.go:144] found /home/jenkins/minikube-integration/15909-3701/.minikube/cert.pem, removing ...
I0223 04:37:14.128997 154064 exec_runner.go:207] rm: /home/jenkins/minikube-integration/15909-3701/.minikube/cert.pem
I0223 04:37:14.129032 154064 exec_runner.go:151] cp: /home/jenkins/minikube-integration/15909-3701/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/15909-3701/.minikube/cert.pem (1123 bytes)
I0223 04:37:14.129086 154064 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/15909-3701/.minikube/certs/key.pem -> /home/jenkins/minikube-integration/15909-3701/.minikube/key.pem
I0223 04:37:14.129113 154064 exec_runner.go:144] found /home/jenkins/minikube-integration/15909-3701/.minikube/key.pem, removing ...
I0223 04:37:14.129120 154064 exec_runner.go:207] rm: /home/jenkins/minikube-integration/15909-3701/.minikube/key.pem
I0223 04:37:14.129155 154064 exec_runner.go:151] cp: /home/jenkins/minikube-integration/15909-3701/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/15909-3701/.minikube/key.pem (1675 bytes)
I0223 04:37:14.129217 154064 provision.go:112] generating server cert: /home/jenkins/minikube-integration/15909-3701/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/15909-3701/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/15909-3701/.minikube/certs/ca-key.pem org=jenkins.multinode-541903 san=[192.168.58.2 127.0.0.1 localhost 127.0.0.1 minikube multinode-541903]
I0223 04:37:14.423115 154064 provision.go:172] copyRemoteCerts
I0223 04:37:14.423176 154064 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0223 04:37:14.423216 154064 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-541903
I0223 04:37:14.486505 154064 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32855 SSHKeyPath:/home/jenkins/minikube-integration/15909-3701/.minikube/machines/multinode-541903/id_rsa Username:docker}
I0223 04:37:14.582913 154064 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/15909-3701/.minikube/machines/server-key.pem -> /etc/docker/server-key.pem
I0223 04:37:14.582970 154064 ssh_runner.go:362] scp /home/jenkins/minikube-integration/15909-3701/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I0223 04:37:14.599531 154064 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/15909-3701/.minikube/certs/ca.pem -> /etc/docker/ca.pem
I0223 04:37:14.599583 154064 ssh_runner.go:362] scp /home/jenkins/minikube-integration/15909-3701/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I0223 04:37:14.615567 154064 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/15909-3701/.minikube/machines/server.pem -> /etc/docker/server.pem
I0223 04:37:14.615610 154064 ssh_runner.go:362] scp /home/jenkins/minikube-integration/15909-3701/.minikube/machines/server.pem --> /etc/docker/server.pem (1224 bytes)
I0223 04:37:14.630972 154064 provision.go:86] duration metric: configureAuth took 563.76766ms
I0223 04:37:14.630992 154064 ubuntu.go:193] setting minikube options for container-runtime
I0223 04:37:14.631126 154064 config.go:182] Loaded profile config "multinode-541903": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.26.1
I0223 04:37:14.631165 154064 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-541903
I0223 04:37:14.694263 154064 main.go:141] libmachine: Using SSH client type: native
I0223 04:37:14.694651 154064 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x17560a0] 0x1759120 <nil> [] 0s} 127.0.0.1 32855 <nil> <nil>}
I0223 04:37:14.694666 154064 main.go:141] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I0223 04:37:14.823243 154064 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
I0223 04:37:14.823266 154064 ubuntu.go:71] root file system type: overlay
I0223 04:37:14.823393 154064 provision.go:309] Updating docker unit: /lib/systemd/system/docker.service ...
I0223 04:37:14.823454 154064 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-541903
I0223 04:37:14.886878 154064 main.go:141] libmachine: Using SSH client type: native
I0223 04:37:14.887289 154064 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x17560a0] 0x1759120 <nil> [] 0s} 127.0.0.1 32855 <nil> <nil>}
I0223 04:37:14.887347 154064 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %!s(MISSING) "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I0223 04:37:15.023768 154064 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
I0223 04:37:15.023837 154064 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-541903
I0223 04:37:15.085479 154064 main.go:141] libmachine: Using SSH client type: native
I0223 04:37:15.085904 154064 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x17560a0] 0x1759120 <nil> [] 0s} 127.0.0.1 32855 <nil> <nil>}
I0223 04:37:15.085923 154064 main.go:141] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I0223 04:37:15.704883 154064 main.go:141] libmachine: SSH cmd err, output: <nil>: --- /lib/systemd/system/docker.service 2023-02-09 19:46:56.000000000 +0000
+++ /lib/systemd/system/docker.service.new 2023-02-23 04:37:15.016544142 +0000
@@ -1,30 +1,32 @@
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
-After=network-online.target docker.socket firewalld.service containerd.service time-set.target
-Wants=network-online.target containerd.service
+BindsTo=containerd.service
+After=network-online.target firewalld.service containerd.service
+Wants=network-online.target
Requires=docker.socket
+StartLimitBurst=3
+StartLimitIntervalSec=60
[Service]
Type=notify
-# the default is not to use systemd for cgroups because the delegate issues still
-# exists and systemd currently does not support the cgroup feature set required
-# for containers run by docker
-ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
-ExecReload=/bin/kill -s HUP $MAINPID
-TimeoutStartSec=0
-RestartSec=2
-Restart=always
+Restart=on-failure
-# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
-# Both the old, and new location are accepted by systemd 229 and up, so using the old location
-# to make them work for either version of systemd.
-StartLimitBurst=3
-# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
-# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
-# this option work for either version of systemd.
-StartLimitInterval=60s
+
+# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
+# The base configuration already specifies an 'ExecStart=...' command. The first directive
+# here is to clear out that command inherited from the base configuration. Without this,
+# the command from the base configuration and the command specified here are treated as
+# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
+# will catch this invalid input and refuse to start the service with an error like:
+# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
+
+# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
+# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
+ExecStart=
+ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
+ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
@@ -32,16 +34,16 @@
LimitNPROC=infinity
LimitCORE=infinity
-# Comment TasksMax if your systemd version does not support it.
-# Only systemd 226 and above support this option.
+# Uncomment TasksMax if your systemd version supports it.
+# Only systemd 226 and above support this version.
TasksMax=infinity
+TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
-OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable docker
I0223 04:37:15.704912 154064 machine.go:91] provisioned docker machine in 2.025579191s
I0223 04:37:15.704920 154064 client.go:171] LocalClient.Create took 9.122144632s
I0223 04:37:15.704934 154064 start.go:167] duration metric: libmachine.API.Create for "multinode-541903" took 9.122193128s
I0223 04:37:15.704940 154064 start.go:300] post-start starting for "multinode-541903" (driver="docker")
I0223 04:37:15.704947 154064 start.go:328] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0223 04:37:15.705005 154064 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0223 04:37:15.705046 154064 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-541903
I0223 04:37:15.770748 154064 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32855 SSHKeyPath:/home/jenkins/minikube-integration/15909-3701/.minikube/machines/multinode-541903/id_rsa Username:docker}
I0223 04:37:15.862873 154064 ssh_runner.go:195] Run: cat /etc/os-release
I0223 04:37:15.865274 154064 command_runner.go:130] > NAME="Ubuntu"
I0223 04:37:15.865292 154064 command_runner.go:130] > VERSION="20.04.5 LTS (Focal Fossa)"
I0223 04:37:15.865305 154064 command_runner.go:130] > ID=ubuntu
I0223 04:37:15.865313 154064 command_runner.go:130] > ID_LIKE=debian
I0223 04:37:15.865321 154064 command_runner.go:130] > PRETTY_NAME="Ubuntu 20.04.5 LTS"
I0223 04:37:15.865328 154064 command_runner.go:130] > VERSION_ID="20.04"
I0223 04:37:15.865340 154064 command_runner.go:130] > HOME_URL="https://www.ubuntu.com/"
I0223 04:37:15.865346 154064 command_runner.go:130] > SUPPORT_URL="https://help.ubuntu.com/"
I0223 04:37:15.865354 154064 command_runner.go:130] > BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/"
I0223 04:37:15.865361 154064 command_runner.go:130] > PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy"
I0223 04:37:15.865368 154064 command_runner.go:130] > VERSION_CODENAME=focal
I0223 04:37:15.865372 154064 command_runner.go:130] > UBUNTU_CODENAME=focal
I0223 04:37:15.865452 154064 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0223 04:37:15.865468 154064 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0223 04:37:15.865477 154064 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0223 04:37:15.865487 154064 info.go:137] Remote host: Ubuntu 20.04.5 LTS
I0223 04:37:15.865499 154064 filesync.go:126] Scanning /home/jenkins/minikube-integration/15909-3701/.minikube/addons for local assets ...
I0223 04:37:15.865558 154064 filesync.go:126] Scanning /home/jenkins/minikube-integration/15909-3701/.minikube/files for local assets ...
I0223 04:37:15.865645 154064 filesync.go:149] local asset: /home/jenkins/minikube-integration/15909-3701/.minikube/files/etc/ssl/certs/103762.pem -> 103762.pem in /etc/ssl/certs
I0223 04:37:15.865662 154064 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/15909-3701/.minikube/files/etc/ssl/certs/103762.pem -> /etc/ssl/certs/103762.pem
I0223 04:37:15.865758 154064 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I0223 04:37:15.871947 154064 ssh_runner.go:362] scp /home/jenkins/minikube-integration/15909-3701/.minikube/files/etc/ssl/certs/103762.pem --> /etc/ssl/certs/103762.pem (1708 bytes)
I0223 04:37:15.888066 154064 start.go:303] post-start completed in 183.114184ms
I0223 04:37:15.888393 154064 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" multinode-541903
I0223 04:37:15.952886 154064 profile.go:148] Saving config to /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/config.json ...
I0223 04:37:15.953109 154064 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0223 04:37:15.953149 154064 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-541903
I0223 04:37:16.014692 154064 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32855 SSHKeyPath:/home/jenkins/minikube-integration/15909-3701/.minikube/machines/multinode-541903/id_rsa Username:docker}
I0223 04:37:16.103480 154064 command_runner.go:130] > 16%!
(MISSING)I0223 04:37:16.103668 154064 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0223 04:37:16.109422 154064 command_runner.go:130] > 245G
I0223 04:37:16.109451 154064 start.go:128] duration metric: createHost completed in 9.529019059s
I0223 04:37:16.109461 154064 start.go:83] releasing machines lock for "multinode-541903", held for 9.529129599s
I0223 04:37:16.109532 154064 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" multinode-541903
I0223 04:37:16.173354 154064 ssh_runner.go:195] Run: cat /version.json
I0223 04:37:16.173403 154064 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-541903
I0223 04:37:16.173450 154064 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0223 04:37:16.173509 154064 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-541903
I0223 04:37:16.242904 154064 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32855 SSHKeyPath:/home/jenkins/minikube-integration/15909-3701/.minikube/machines/multinode-541903/id_rsa Username:docker}
I0223 04:37:16.244812 154064 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32855 SSHKeyPath:/home/jenkins/minikube-integration/15909-3701/.minikube/machines/multinode-541903/id_rsa Username:docker}
I0223 04:37:16.330533 154064 command_runner.go:130] > {"iso_version": "v1.29.0-1676397967-15752", "kicbase_version": "v0.0.37-1676506612-15768", "minikube_version": "v1.29.0", "commit": "1ecebb4330bc6283999d4ca9b3c62a9eeee8c692"}
I0223 04:37:16.357648 154064 command_runner.go:130] > <a href="https://github.com/kubernetes/registry.k8s.io">Temporary Redirect</a>.
I0223 04:37:16.358918 154064 ssh_runner.go:195] Run: systemctl --version
I0223 04:37:16.362192 154064 command_runner.go:130] > systemd 245 (245.4-4ubuntu3.19)
I0223 04:37:16.362214 154064 command_runner.go:130] > +PAM +AUDIT +SELINUX +IMA +APPARMOR +SMACK +SYSVINIT +UTMP +LIBCRYPTSETUP +GCRYPT +GNUTLS +ACL +XZ +LZ4 +SECCOMP +BLKID +ELFUTILS +KMOD +IDN2 -IDN +PCRE2 default-hierarchy=hybrid
I0223 04:37:16.362268 154064 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0223 04:37:16.365569 154064 command_runner.go:130] > File: /etc/cni/net.d/200-loopback.conf
I0223 04:37:16.365584 154064 command_runner.go:130] > Size: 54 Blocks: 8 IO Block: 4096 regular file
I0223 04:37:16.365595 154064 command_runner.go:130] > Device: 36h/54d Inode: 1319768 Links: 1
I0223 04:37:16.365601 154064 command_runner.go:130] > Access: (0644/-rw-r--r--) Uid: ( 0/ root) Gid: ( 0/ root)
I0223 04:37:16.365611 154064 command_runner.go:130] > Access: 2023-01-10 16:48:19.000000000 +0000
I0223 04:37:16.365616 154064 command_runner.go:130] > Modify: 2023-01-10 16:48:19.000000000 +0000
I0223 04:37:16.365622 154064 command_runner.go:130] > Change: 2023-02-23 04:22:37.544324683 +0000
I0223 04:37:16.365626 154064 command_runner.go:130] > Birth: -
I0223 04:37:16.365671 154064 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0223 04:37:16.383330 154064 cni.go:229] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0223 04:37:16.383372 154064 ssh_runner.go:195] Run: which cri-dockerd
I0223 04:37:16.385739 154064 command_runner.go:130] > /usr/bin/cri-dockerd
I0223 04:37:16.385826 154064 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I0223 04:37:16.391843 154064 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (135 bytes)
I0223 04:37:16.402955 154064 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%!p(MISSING), " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0223 04:37:16.415876 154064 command_runner.go:139] > /etc/cni/net.d/100-crio-bridge.conf,
I0223 04:37:16.415925 154064 cni.go:261] disabled [/etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0223 04:37:16.415940 154064 start.go:485] detecting cgroup driver to use...
I0223 04:37:16.415968 154064 detect.go:196] detected "cgroupfs" cgroup driver on host os
I0223 04:37:16.416065 154064 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %!s(MISSING) "runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0223 04:37:16.426419 154064 command_runner.go:130] > runtime-endpoint: unix:///run/containerd/containerd.sock
I0223 04:37:16.426439 154064 command_runner.go:130] > image-endpoint: unix:///run/containerd/containerd.sock
I0223 04:37:16.427087 154064 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.9"|' /etc/containerd/config.toml"
I0223 04:37:16.433912 154064 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0223 04:37:16.440548 154064 containerd.go:145] configuring containerd to use "cgroupfs" as cgroup driver...
I0223 04:37:16.440584 154064 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I0223 04:37:16.447697 154064 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0223 04:37:16.454624 154064 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0223 04:37:16.461278 154064 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0223 04:37:16.468031 154064 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0223 04:37:16.474371 154064 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0223 04:37:16.481973 154064 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0223 04:37:16.487952 154064 command_runner.go:130] > net.bridge.bridge-nf-call-iptables = 1
I0223 04:37:16.488461 154064 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0223 04:37:16.494264 154064 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0223 04:37:16.566533 154064 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0223 04:37:16.640696 154064 start.go:485] detecting cgroup driver to use...
I0223 04:37:16.640741 154064 detect.go:196] detected "cgroupfs" cgroup driver on host os
I0223 04:37:16.640790 154064 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0223 04:37:16.649192 154064 command_runner.go:130] > # /lib/systemd/system/docker.service
I0223 04:37:16.649211 154064 command_runner.go:130] > [Unit]
I0223 04:37:16.649225 154064 command_runner.go:130] > Description=Docker Application Container Engine
I0223 04:37:16.649234 154064 command_runner.go:130] > Documentation=https://docs.docker.com
I0223 04:37:16.649242 154064 command_runner.go:130] > BindsTo=containerd.service
I0223 04:37:16.649251 154064 command_runner.go:130] > After=network-online.target firewalld.service containerd.service
I0223 04:37:16.649261 154064 command_runner.go:130] > Wants=network-online.target
I0223 04:37:16.649270 154064 command_runner.go:130] > Requires=docker.socket
I0223 04:37:16.649279 154064 command_runner.go:130] > StartLimitBurst=3
I0223 04:37:16.649286 154064 command_runner.go:130] > StartLimitIntervalSec=60
I0223 04:37:16.649293 154064 command_runner.go:130] > [Service]
I0223 04:37:16.649297 154064 command_runner.go:130] > Type=notify
I0223 04:37:16.649303 154064 command_runner.go:130] > Restart=on-failure
I0223 04:37:16.649318 154064 command_runner.go:130] > # This file is a systemd drop-in unit that inherits from the base dockerd configuration.
I0223 04:37:16.649333 154064 command_runner.go:130] > # The base configuration already specifies an 'ExecStart=...' command. The first directive
I0223 04:37:16.649347 154064 command_runner.go:130] > # here is to clear out that command inherited from the base configuration. Without this,
I0223 04:37:16.649360 154064 command_runner.go:130] > # the command from the base configuration and the command specified here are treated as
I0223 04:37:16.649373 154064 command_runner.go:130] > # a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
I0223 04:37:16.649383 154064 command_runner.go:130] > # will catch this invalid input and refuse to start the service with an error like:
I0223 04:37:16.649393 154064 command_runner.go:130] > # Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
I0223 04:37:16.649417 154064 command_runner.go:130] > # NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
I0223 04:37:16.649432 154064 command_runner.go:130] > # container runtimes. If left unlimited, it may result in OOM issues with MySQL.
I0223 04:37:16.649438 154064 command_runner.go:130] > ExecStart=
I0223 04:37:16.649463 154064 command_runner.go:130] > ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
I0223 04:37:16.649471 154064 command_runner.go:130] > ExecReload=/bin/kill -s HUP $MAINPID
I0223 04:37:16.649481 154064 command_runner.go:130] > # Having non-zero Limit*s causes performance problems due to accounting overhead
I0223 04:37:16.649494 154064 command_runner.go:130] > # in the kernel. We recommend using cgroups to do container-local accounting.
I0223 04:37:16.649503 154064 command_runner.go:130] > LimitNOFILE=infinity
I0223 04:37:16.649510 154064 command_runner.go:130] > LimitNPROC=infinity
I0223 04:37:16.649519 154064 command_runner.go:130] > LimitCORE=infinity
I0223 04:37:16.649528 154064 command_runner.go:130] > # Uncomment TasksMax if your systemd version supports it.
I0223 04:37:16.649539 154064 command_runner.go:130] > # Only systemd 226 and above support this version.
I0223 04:37:16.649547 154064 command_runner.go:130] > TasksMax=infinity
I0223 04:37:16.649551 154064 command_runner.go:130] > TimeoutStartSec=0
I0223 04:37:16.649562 154064 command_runner.go:130] > # set delegate yes so that systemd does not reset the cgroups of docker containers
I0223 04:37:16.649571 154064 command_runner.go:130] > Delegate=yes
I0223 04:37:16.649580 154064 command_runner.go:130] > # kill only the docker process, not all processes in the cgroup
I0223 04:37:16.649590 154064 command_runner.go:130] > KillMode=process
I0223 04:37:16.649605 154064 command_runner.go:130] > [Install]
I0223 04:37:16.649615 154064 command_runner.go:130] > WantedBy=multi-user.target
I0223 04:37:16.650082 154064 cruntime.go:273] skipping containerd shutdown because we are bound to it
I0223 04:37:16.650142 154064 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0223 04:37:16.659386 154064 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %!s(MISSING) "runtime-endpoint: unix:///var/run/cri-dockerd.sock
image-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I0223 04:37:16.671389 154064 command_runner.go:130] > runtime-endpoint: unix:///var/run/cri-dockerd.sock
I0223 04:37:16.671421 154064 command_runner.go:130] > image-endpoint: unix:///var/run/cri-dockerd.sock
I0223 04:37:16.672205 154064 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I0223 04:37:16.748475 154064 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I0223 04:37:16.835076 154064 docker.go:529] configuring docker to use "cgroupfs" as cgroup driver...
I0223 04:37:16.835102 154064 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (144 bytes)
I0223 04:37:16.848446 154064 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0223 04:37:16.927598 154064 ssh_runner.go:195] Run: sudo systemctl restart docker
I0223 04:37:17.124532 154064 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I0223 04:37:17.134217 154064 command_runner.go:130] ! Created symlink /etc/systemd/system/sockets.target.wants/cri-docker.socket → /lib/systemd/system/cri-docker.socket.
I0223 04:37:17.202148 154064 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I0223 04:37:17.277472 154064 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I0223 04:37:17.346030 154064 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0223 04:37:17.413972 154064 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I0223 04:37:17.424132 154064 start.go:532] Will wait 60s for socket path /var/run/cri-dockerd.sock
I0223 04:37:17.424185 154064 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I0223 04:37:17.426934 154064 command_runner.go:130] > File: /var/run/cri-dockerd.sock
I0223 04:37:17.426955 154064 command_runner.go:130] > Size: 0 Blocks: 0 IO Block: 4096 socket
I0223 04:37:17.426964 154064 command_runner.go:130] > Device: 3fh/63d Inode: 206 Links: 1
I0223 04:37:17.426974 154064 command_runner.go:130] > Access: (0660/srw-rw----) Uid: ( 0/ root) Gid: ( 999/ docker)
I0223 04:37:17.426984 154064 command_runner.go:130] > Access: 2023-02-23 04:37:17.416785464 +0000
I0223 04:37:17.426992 154064 command_runner.go:130] > Modify: 2023-02-23 04:37:17.416785464 +0000
I0223 04:37:17.427003 154064 command_runner.go:130] > Change: 2023-02-23 04:37:17.416785464 +0000
I0223 04:37:17.427010 154064 command_runner.go:130] > Birth: -
I0223 04:37:17.427034 154064 start.go:553] Will wait 60s for crictl version
I0223 04:37:17.427075 154064 ssh_runner.go:195] Run: which crictl
I0223 04:37:17.429403 154064 command_runner.go:130] > /usr/bin/crictl
I0223 04:37:17.429486 154064 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0223 04:37:17.501080 154064 command_runner.go:130] > Version: 0.1.0
I0223 04:37:17.501102 154064 command_runner.go:130] > RuntimeName: docker
I0223 04:37:17.501109 154064 command_runner.go:130] > RuntimeVersion: 23.0.1
I0223 04:37:17.501118 154064 command_runner.go:130] > RuntimeApiVersion: v1alpha2
I0223 04:37:17.502677 154064 start.go:569] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 23.0.1
RuntimeApiVersion: v1alpha2
I0223 04:37:17.502722 154064 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0223 04:37:17.523071 154064 command_runner.go:130] > 23.0.1
I0223 04:37:17.523143 154064 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0223 04:37:17.543232 154064 command_runner.go:130] > 23.0.1
I0223 04:37:17.547752 154064 out.go:204] * Preparing Kubernetes v1.26.1 on Docker 23.0.1 ...
I0223 04:37:17.547834 154064 cli_runner.go:164] Run: docker network inspect multinode-541903 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0223 04:37:17.608731 154064 ssh_runner.go:195] Run: grep 192.168.58.1 host.minikube.internal$ /etc/hosts
I0223 04:37:17.611762 154064 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.58.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0223 04:37:17.620653 154064 preload.go:132] Checking if preload exists for k8s version v1.26.1 and runtime docker
I0223 04:37:17.620718 154064 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0223 04:37:17.636012 154064 command_runner.go:130] > registry.k8s.io/kube-apiserver:v1.26.1
I0223 04:37:17.636034 154064 command_runner.go:130] > registry.k8s.io/kube-controller-manager:v1.26.1
I0223 04:37:17.636039 154064 command_runner.go:130] > registry.k8s.io/kube-scheduler:v1.26.1
I0223 04:37:17.636045 154064 command_runner.go:130] > registry.k8s.io/kube-proxy:v1.26.1
I0223 04:37:17.636049 154064 command_runner.go:130] > registry.k8s.io/etcd:3.5.6-0
I0223 04:37:17.636053 154064 command_runner.go:130] > registry.k8s.io/pause:3.9
I0223 04:37:17.636058 154064 command_runner.go:130] > registry.k8s.io/coredns/coredns:v1.9.3
I0223 04:37:17.636068 154064 command_runner.go:130] > gcr.io/k8s-minikube/storage-provisioner:v5
I0223 04:37:17.636926 154064 docker.go:630] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.26.1
registry.k8s.io/kube-controller-manager:v1.26.1
registry.k8s.io/kube-scheduler:v1.26.1
registry.k8s.io/kube-proxy:v1.26.1
registry.k8s.io/etcd:3.5.6-0
registry.k8s.io/pause:3.9
registry.k8s.io/coredns/coredns:v1.9.3
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0223 04:37:17.636948 154064 docker.go:560] Images already preloaded, skipping extraction
I0223 04:37:17.636999 154064 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0223 04:37:17.652356 154064 command_runner.go:130] > registry.k8s.io/kube-apiserver:v1.26.1
I0223 04:37:17.652376 154064 command_runner.go:130] > registry.k8s.io/kube-controller-manager:v1.26.1
I0223 04:37:17.652381 154064 command_runner.go:130] > registry.k8s.io/kube-scheduler:v1.26.1
I0223 04:37:17.652386 154064 command_runner.go:130] > registry.k8s.io/kube-proxy:v1.26.1
I0223 04:37:17.652391 154064 command_runner.go:130] > registry.k8s.io/etcd:3.5.6-0
I0223 04:37:17.652395 154064 command_runner.go:130] > registry.k8s.io/pause:3.9
I0223 04:37:17.652400 154064 command_runner.go:130] > registry.k8s.io/coredns/coredns:v1.9.3
I0223 04:37:17.652405 154064 command_runner.go:130] > gcr.io/k8s-minikube/storage-provisioner:v5
I0223 04:37:17.653358 154064 docker.go:630] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.26.1
registry.k8s.io/kube-controller-manager:v1.26.1
registry.k8s.io/kube-scheduler:v1.26.1
registry.k8s.io/kube-proxy:v1.26.1
registry.k8s.io/etcd:3.5.6-0
registry.k8s.io/pause:3.9
registry.k8s.io/coredns/coredns:v1.9.3
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0223 04:37:17.653375 154064 cache_images.go:84] Images are preloaded, skipping loading
I0223 04:37:17.653422 154064 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I0223 04:37:17.673311 154064 command_runner.go:130] > cgroupfs
I0223 04:37:17.674457 154064 cni.go:84] Creating CNI manager for ""
I0223 04:37:17.674473 154064 cni.go:136] 1 nodes found, recommending kindnet
I0223 04:37:17.674488 154064 kubeadm.go:87] Using pod CIDR: 10.244.0.0/16
I0223 04:37:17.674510 154064 kubeadm.go:172] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.58.2 APIServerPort:8443 KubernetesVersion:v1.26.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:multinode-541903 NodeName:multinode-541903 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.58.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.58.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc
/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[hairpinMode:hairpin-veth runtimeRequestTimeout:15m]}
I0223 04:37:17.674651 154064 kubeadm.go:177] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.58.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: /var/run/cri-dockerd.sock
name: "multinode-541903"
kubeletExtraArgs:
node-ip: 192.168.58.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.58.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.26.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%!"(MISSING)
nodefs.inodesFree: "0%!"(MISSING)
imagefs.available: "0%!"(MISSING)
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0223 04:37:17.674725 154064 kubeadm.go:968] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.26.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=/var/run/cri-dockerd.sock --hostname-override=multinode-541903 --image-service-endpoint=/var/run/cri-dockerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.58.2
[Install]
config:
{KubernetesVersion:v1.26.1 ClusterName:multinode-541903 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:}
I0223 04:37:17.674769 154064 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.26.1
I0223 04:37:17.681105 154064 command_runner.go:130] > kubeadm
I0223 04:37:17.681123 154064 command_runner.go:130] > kubectl
I0223 04:37:17.681127 154064 command_runner.go:130] > kubelet
I0223 04:37:17.681143 154064 binaries.go:44] Found k8s binaries, skipping transfer
I0223 04:37:17.681181 154064 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I0223 04:37:17.687150 154064 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (448 bytes)
I0223 04:37:17.698589 154064 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0223 04:37:17.710454 154064 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2092 bytes)
I0223 04:37:17.721734 154064 ssh_runner.go:195] Run: grep 192.168.58.2 control-plane.minikube.internal$ /etc/hosts
I0223 04:37:17.724238 154064 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.58.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0223 04:37:17.732191 154064 certs.go:56] Setting up /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903 for IP: 192.168.58.2
I0223 04:37:17.732218 154064 certs.go:186] acquiring lock for shared ca certs: {Name:mk899ab74bdb751a75c201c69d3c66668a7f7f94 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0223 04:37:17.732353 154064 certs.go:195] skipping minikubeCA CA generation: /home/jenkins/minikube-integration/15909-3701/.minikube/ca.key
I0223 04:37:17.732400 154064 certs.go:195] skipping proxyClientCA CA generation: /home/jenkins/minikube-integration/15909-3701/.minikube/proxy-client-ca.key
I0223 04:37:17.732454 154064 certs.go:315] generating minikube-user signed cert: /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/client.key
I0223 04:37:17.732475 154064 crypto.go:68] Generating cert /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/client.crt with IP's: []
I0223 04:37:17.968720 154064 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/client.crt ...
I0223 04:37:17.968747 154064 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/client.crt: {Name:mkaa40cc67a3f331fcb42f9537e9d553e6a6a804 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0223 04:37:17.968940 154064 crypto.go:164] Writing key to /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/client.key ...
I0223 04:37:17.968957 154064 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/client.key: {Name:mk593b764ae7f3c0a244309405a2f8e97b1be1a8 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0223 04:37:17.969062 154064 certs.go:315] generating minikube signed cert: /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/apiserver.key.cee25041
I0223 04:37:17.969082 154064 crypto.go:68] Generating cert /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/apiserver.crt.cee25041 with IP's: [192.168.58.2 10.96.0.1 127.0.0.1 10.0.0.1]
I0223 04:37:18.116421 154064 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/apiserver.crt.cee25041 ...
I0223 04:37:18.116446 154064 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/apiserver.crt.cee25041: {Name:mk0b0d8dba876a76936eab1cfdc6102734f6b762 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0223 04:37:18.116592 154064 crypto.go:164] Writing key to /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/apiserver.key.cee25041 ...
I0223 04:37:18.116603 154064 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/apiserver.key.cee25041: {Name:mk3df25e78e53e71aa6abe4b156d8ea557091aac Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0223 04:37:18.116664 154064 certs.go:333] copying /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/apiserver.crt.cee25041 -> /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/apiserver.crt
I0223 04:37:18.116732 154064 certs.go:337] copying /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/apiserver.key.cee25041 -> /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/apiserver.key
I0223 04:37:18.116779 154064 certs.go:315] generating aggregator signed cert: /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/proxy-client.key
I0223 04:37:18.116791 154064 crypto.go:68] Generating cert /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/proxy-client.crt with IP's: []
I0223 04:37:18.209342 154064 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/proxy-client.crt ...
I0223 04:37:18.209366 154064 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/proxy-client.crt: {Name:mkde504abc6aa6c7446c3b7c3e2463efce8178a3 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0223 04:37:18.209526 154064 crypto.go:164] Writing key to /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/proxy-client.key ...
I0223 04:37:18.209542 154064 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/proxy-client.key: {Name:mkacc7d59858b9c27b7029c7db3602ca38432d86 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0223 04:37:18.209629 154064 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/apiserver.crt -> /var/lib/minikube/certs/apiserver.crt
I0223 04:37:18.209650 154064 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/apiserver.key -> /var/lib/minikube/certs/apiserver.key
I0223 04:37:18.209670 154064 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/proxy-client.crt -> /var/lib/minikube/certs/proxy-client.crt
I0223 04:37:18.209687 154064 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/proxy-client.key -> /var/lib/minikube/certs/proxy-client.key
I0223 04:37:18.209704 154064 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/15909-3701/.minikube/ca.crt -> /var/lib/minikube/certs/ca.crt
I0223 04:37:18.209720 154064 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/15909-3701/.minikube/ca.key -> /var/lib/minikube/certs/ca.key
I0223 04:37:18.209732 154064 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/15909-3701/.minikube/proxy-client-ca.crt -> /var/lib/minikube/certs/proxy-client-ca.crt
I0223 04:37:18.209751 154064 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/15909-3701/.minikube/proxy-client-ca.key -> /var/lib/minikube/certs/proxy-client-ca.key
I0223 04:37:18.209820 154064 certs.go:401] found cert: /home/jenkins/minikube-integration/15909-3701/.minikube/certs/home/jenkins/minikube-integration/15909-3701/.minikube/certs/10376.pem (1338 bytes)
W0223 04:37:18.209865 154064 certs.go:397] ignoring /home/jenkins/minikube-integration/15909-3701/.minikube/certs/home/jenkins/minikube-integration/15909-3701/.minikube/certs/10376_empty.pem, impossibly tiny 0 bytes
I0223 04:37:18.209880 154064 certs.go:401] found cert: /home/jenkins/minikube-integration/15909-3701/.minikube/certs/home/jenkins/minikube-integration/15909-3701/.minikube/certs/ca-key.pem (1675 bytes)
I0223 04:37:18.209916 154064 certs.go:401] found cert: /home/jenkins/minikube-integration/15909-3701/.minikube/certs/home/jenkins/minikube-integration/15909-3701/.minikube/certs/ca.pem (1082 bytes)
I0223 04:37:18.209947 154064 certs.go:401] found cert: /home/jenkins/minikube-integration/15909-3701/.minikube/certs/home/jenkins/minikube-integration/15909-3701/.minikube/certs/cert.pem (1123 bytes)
I0223 04:37:18.209978 154064 certs.go:401] found cert: /home/jenkins/minikube-integration/15909-3701/.minikube/certs/home/jenkins/minikube-integration/15909-3701/.minikube/certs/key.pem (1675 bytes)
I0223 04:37:18.210040 154064 certs.go:401] found cert: /home/jenkins/minikube-integration/15909-3701/.minikube/files/etc/ssl/certs/home/jenkins/minikube-integration/15909-3701/.minikube/files/etc/ssl/certs/103762.pem (1708 bytes)
I0223 04:37:18.210075 154064 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/15909-3701/.minikube/certs/10376.pem -> /usr/share/ca-certificates/10376.pem
I0223 04:37:18.210095 154064 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/15909-3701/.minikube/files/etc/ssl/certs/103762.pem -> /usr/share/ca-certificates/103762.pem
I0223 04:37:18.210113 154064 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/15909-3701/.minikube/ca.crt -> /usr/share/ca-certificates/minikubeCA.pem
I0223 04:37:18.210592 154064 ssh_runner.go:362] scp /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1399 bytes)
I0223 04:37:18.227918 154064 ssh_runner.go:362] scp /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I0223 04:37:18.243619 154064 ssh_runner.go:362] scp /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0223 04:37:18.259340 154064 ssh_runner.go:362] scp /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I0223 04:37:18.275120 154064 ssh_runner.go:362] scp /home/jenkins/minikube-integration/15909-3701/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0223 04:37:18.290371 154064 ssh_runner.go:362] scp /home/jenkins/minikube-integration/15909-3701/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I0223 04:37:18.305722 154064 ssh_runner.go:362] scp /home/jenkins/minikube-integration/15909-3701/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0223 04:37:18.321104 154064 ssh_runner.go:362] scp /home/jenkins/minikube-integration/15909-3701/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I0223 04:37:18.336475 154064 ssh_runner.go:362] scp /home/jenkins/minikube-integration/15909-3701/.minikube/certs/10376.pem --> /usr/share/ca-certificates/10376.pem (1338 bytes)
I0223 04:37:18.351717 154064 ssh_runner.go:362] scp /home/jenkins/minikube-integration/15909-3701/.minikube/files/etc/ssl/certs/103762.pem --> /usr/share/ca-certificates/103762.pem (1708 bytes)
I0223 04:37:18.366648 154064 ssh_runner.go:362] scp /home/jenkins/minikube-integration/15909-3701/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0223 04:37:18.381709 154064 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0223 04:37:18.393039 154064 ssh_runner.go:195] Run: openssl version
I0223 04:37:18.397104 154064 command_runner.go:130] > OpenSSL 1.1.1f 31 Mar 2020
I0223 04:37:18.397286 154064 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0223 04:37:18.403637 154064 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0223 04:37:18.406173 154064 command_runner.go:130] > -rw-r--r-- 1 root root 1111 Feb 23 04:22 /usr/share/ca-certificates/minikubeCA.pem
I0223 04:37:18.406297 154064 certs.go:444] hashing: -rw-r--r-- 1 root root 1111 Feb 23 04:22 /usr/share/ca-certificates/minikubeCA.pem
I0223 04:37:18.406343 154064 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0223 04:37:18.410450 154064 command_runner.go:130] > b5213941
I0223 04:37:18.410588 154064 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0223 04:37:18.416961 154064 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/10376.pem && ln -fs /usr/share/ca-certificates/10376.pem /etc/ssl/certs/10376.pem"
I0223 04:37:18.423236 154064 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/10376.pem
I0223 04:37:18.425791 154064 command_runner.go:130] > -rw-r--r-- 1 root root 1338 Feb 23 04:26 /usr/share/ca-certificates/10376.pem
I0223 04:37:18.425944 154064 certs.go:444] hashing: -rw-r--r-- 1 root root 1338 Feb 23 04:26 /usr/share/ca-certificates/10376.pem
I0223 04:37:18.425980 154064 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/10376.pem
I0223 04:37:18.429944 154064 command_runner.go:130] > 51391683
I0223 04:37:18.430098 154064 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/10376.pem /etc/ssl/certs/51391683.0"
I0223 04:37:18.436337 154064 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/103762.pem && ln -fs /usr/share/ca-certificates/103762.pem /etc/ssl/certs/103762.pem"
I0223 04:37:18.442648 154064 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/103762.pem
I0223 04:37:18.445173 154064 command_runner.go:130] > -rw-r--r-- 1 root root 1708 Feb 23 04:26 /usr/share/ca-certificates/103762.pem
I0223 04:37:18.445228 154064 certs.go:444] hashing: -rw-r--r-- 1 root root 1708 Feb 23 04:26 /usr/share/ca-certificates/103762.pem
I0223 04:37:18.445270 154064 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/103762.pem
I0223 04:37:18.449243 154064 command_runner.go:130] > 3ec20f2e
I0223 04:37:18.449296 154064 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/103762.pem /etc/ssl/certs/3ec20f2e.0"
I0223 04:37:18.455562 154064 kubeadm.go:401] StartCluster: {Name:multinode-541903 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.37-1676506612-15768@sha256:cc1cb283879fedae93096946a6953a50075ed680d467a47cbf669e0ed7d3aebc Memory:2200 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.26.1 ClusterName:multinode-541903 Namespace:default APIServerName:minikubeCA APIServerNames:[]
APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP:192.168.58.2 Port:8443 KubernetesVersion:v1.26.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false Disable
Metrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP:}
I0223 04:37:18.455661 154064 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I0223 04:37:18.470923 154064 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0223 04:37:18.476927 154064 command_runner.go:130] ! ls: cannot access '/var/lib/kubelet/kubeadm-flags.env': No such file or directory
I0223 04:37:18.476950 154064 command_runner.go:130] ! ls: cannot access '/var/lib/kubelet/config.yaml': No such file or directory
I0223 04:37:18.476961 154064 command_runner.go:130] ! ls: cannot access '/var/lib/minikube/etcd': No such file or directory
I0223 04:37:18.477016 154064 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0223 04:37:18.483035 154064 kubeadm.go:226] ignoring SystemVerification for kubeadm because of docker driver
I0223 04:37:18.483084 154064 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0223 04:37:18.489026 154064 command_runner.go:130] ! ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
I0223 04:37:18.489049 154064 command_runner.go:130] ! ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
I0223 04:37:18.489060 154064 command_runner.go:130] ! ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
I0223 04:37:18.489072 154064 command_runner.go:130] ! ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0223 04:37:18.489107 154064 kubeadm.go:152] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0223 04:37:18.489136 154064 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.26.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0223 04:37:18.526229 154064 kubeadm.go:322] W0223 04:37:18.525595 1405 initconfiguration.go:119] Usage of CRI endpoints without URL scheme is deprecated and can cause kubelet errors in the future. Automatically prepending scheme "unix" to the "criSocket" with value "/var/run/cri-dockerd.sock". Please update your configuration!
I0223 04:37:18.526252 154064 command_runner.go:130] ! W0223 04:37:18.525595 1405 initconfiguration.go:119] Usage of CRI endpoints without URL scheme is deprecated and can cause kubelet errors in the future. Automatically prepending scheme "unix" to the "criSocket" with value "/var/run/cri-dockerd.sock". Please update your configuration!
I0223 04:37:18.564071 154064 kubeadm.go:322] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1029-gcp\n", err: exit status 1
I0223 04:37:18.564100 154064 command_runner.go:130] ! [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1029-gcp\n", err: exit status 1
I0223 04:37:18.625254 154064 kubeadm.go:322] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0223 04:37:18.625277 154064 command_runner.go:130] ! [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0223 04:37:31.900363 154064 kubeadm.go:322] [init] Using Kubernetes version: v1.26.1
I0223 04:37:31.900391 154064 command_runner.go:130] > [init] Using Kubernetes version: v1.26.1
I0223 04:37:31.900449 154064 kubeadm.go:322] [preflight] Running pre-flight checks
I0223 04:37:31.900459 154064 command_runner.go:130] > [preflight] Running pre-flight checks
I0223 04:37:31.900557 154064 kubeadm.go:322] [preflight] The system verification failed. Printing the output from the verification:
I0223 04:37:31.900567 154064 command_runner.go:130] > [preflight] The system verification failed. Printing the output from the verification:
I0223 04:37:31.900671 154064 kubeadm.go:322] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1029-gcp[0m
I0223 04:37:31.900694 154064 command_runner.go:130] > [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1029-gcp[0m
I0223 04:37:31.900743 154064 kubeadm.go:322] [0;37mOS[0m: [0;32mLinux[0m
I0223 04:37:31.900755 154064 command_runner.go:130] > [0;37mOS[0m: [0;32mLinux[0m
I0223 04:37:31.900814 154064 kubeadm.go:322] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0223 04:37:31.900824 154064 command_runner.go:130] > [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0223 04:37:31.900901 154064 kubeadm.go:322] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I0223 04:37:31.900912 154064 command_runner.go:130] > [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I0223 04:37:31.900970 154064 kubeadm.go:322] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0223 04:37:31.900980 154064 command_runner.go:130] > [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0223 04:37:31.901050 154064 kubeadm.go:322] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0223 04:37:31.901061 154064 command_runner.go:130] > [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0223 04:37:31.901127 154064 kubeadm.go:322] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0223 04:37:31.901138 154064 command_runner.go:130] > [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0223 04:37:31.901195 154064 kubeadm.go:322] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0223 04:37:31.901205 154064 command_runner.go:130] > [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0223 04:37:31.901262 154064 kubeadm.go:322] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0223 04:37:31.901272 154064 command_runner.go:130] > [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0223 04:37:31.901332 154064 kubeadm.go:322] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0223 04:37:31.901342 154064 command_runner.go:130] > [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0223 04:37:31.901396 154064 kubeadm.go:322] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I0223 04:37:31.901408 154064 command_runner.go:130] > [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I0223 04:37:31.901504 154064 kubeadm.go:322] [preflight] Pulling images required for setting up a Kubernetes cluster
I0223 04:37:31.901519 154064 command_runner.go:130] > [preflight] Pulling images required for setting up a Kubernetes cluster
I0223 04:37:31.901620 154064 kubeadm.go:322] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0223 04:37:31.901635 154064 command_runner.go:130] > [preflight] This might take a minute or two, depending on the speed of your internet connection
I0223 04:37:31.901736 154064 kubeadm.go:322] [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
I0223 04:37:31.901748 154064 command_runner.go:130] > [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
I0223 04:37:31.901819 154064 kubeadm.go:322] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0223 04:37:31.903547 154064 out.go:204] - Generating certificates and keys ...
I0223 04:37:31.901868 154064 command_runner.go:130] > [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0223 04:37:31.903650 154064 command_runner.go:130] > [certs] Using existing ca certificate authority
I0223 04:37:31.903664 154064 kubeadm.go:322] [certs] Using existing ca certificate authority
I0223 04:37:31.903731 154064 command_runner.go:130] > [certs] Using existing apiserver certificate and key on disk
I0223 04:37:31.903740 154064 kubeadm.go:322] [certs] Using existing apiserver certificate and key on disk
I0223 04:37:31.903834 154064 command_runner.go:130] > [certs] Generating "apiserver-kubelet-client" certificate and key
I0223 04:37:31.903844 154064 kubeadm.go:322] [certs] Generating "apiserver-kubelet-client" certificate and key
I0223 04:37:31.903979 154064 command_runner.go:130] > [certs] Generating "front-proxy-ca" certificate and key
I0223 04:37:31.903991 154064 kubeadm.go:322] [certs] Generating "front-proxy-ca" certificate and key
I0223 04:37:31.904071 154064 command_runner.go:130] > [certs] Generating "front-proxy-client" certificate and key
I0223 04:37:31.904083 154064 kubeadm.go:322] [certs] Generating "front-proxy-client" certificate and key
I0223 04:37:31.904150 154064 command_runner.go:130] > [certs] Generating "etcd/ca" certificate and key
I0223 04:37:31.904159 154064 kubeadm.go:322] [certs] Generating "etcd/ca" certificate and key
I0223 04:37:31.904220 154064 command_runner.go:130] > [certs] Generating "etcd/server" certificate and key
I0223 04:37:31.904230 154064 kubeadm.go:322] [certs] Generating "etcd/server" certificate and key
I0223 04:37:31.904351 154064 command_runner.go:130] > [certs] etcd/server serving cert is signed for DNS names [localhost multinode-541903] and IPs [192.168.58.2 127.0.0.1 ::1]
I0223 04:37:31.904373 154064 kubeadm.go:322] [certs] etcd/server serving cert is signed for DNS names [localhost multinode-541903] and IPs [192.168.58.2 127.0.0.1 ::1]
I0223 04:37:31.904463 154064 command_runner.go:130] > [certs] Generating "etcd/peer" certificate and key
I0223 04:37:31.904477 154064 kubeadm.go:322] [certs] Generating "etcd/peer" certificate and key
I0223 04:37:31.904641 154064 command_runner.go:130] > [certs] etcd/peer serving cert is signed for DNS names [localhost multinode-541903] and IPs [192.168.58.2 127.0.0.1 ::1]
I0223 04:37:31.904652 154064 kubeadm.go:322] [certs] etcd/peer serving cert is signed for DNS names [localhost multinode-541903] and IPs [192.168.58.2 127.0.0.1 ::1]
I0223 04:37:31.904764 154064 command_runner.go:130] > [certs] Generating "etcd/healthcheck-client" certificate and key
I0223 04:37:31.904781 154064 kubeadm.go:322] [certs] Generating "etcd/healthcheck-client" certificate and key
I0223 04:37:31.904873 154064 command_runner.go:130] > [certs] Generating "apiserver-etcd-client" certificate and key
I0223 04:37:31.904884 154064 kubeadm.go:322] [certs] Generating "apiserver-etcd-client" certificate and key
I0223 04:37:31.904936 154064 command_runner.go:130] > [certs] Generating "sa" key and public key
I0223 04:37:31.904945 154064 kubeadm.go:322] [certs] Generating "sa" key and public key
I0223 04:37:31.905016 154064 command_runner.go:130] > [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0223 04:37:31.905025 154064 kubeadm.go:322] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0223 04:37:31.905088 154064 command_runner.go:130] > [kubeconfig] Writing "admin.conf" kubeconfig file
I0223 04:37:31.905094 154064 kubeadm.go:322] [kubeconfig] Writing "admin.conf" kubeconfig file
I0223 04:37:31.905164 154064 command_runner.go:130] > [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0223 04:37:31.905173 154064 kubeadm.go:322] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0223 04:37:31.905258 154064 command_runner.go:130] > [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0223 04:37:31.905269 154064 kubeadm.go:322] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0223 04:37:31.905342 154064 command_runner.go:130] > [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0223 04:37:31.905352 154064 kubeadm.go:322] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0223 04:37:31.905484 154064 command_runner.go:130] > [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0223 04:37:31.905494 154064 kubeadm.go:322] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0223 04:37:31.905605 154064 command_runner.go:130] > [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0223 04:37:31.905615 154064 kubeadm.go:322] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0223 04:37:31.905668 154064 command_runner.go:130] > [kubelet-start] Starting the kubelet
I0223 04:37:31.905680 154064 kubeadm.go:322] [kubelet-start] Starting the kubelet
I0223 04:37:31.905769 154064 command_runner.go:130] > [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0223 04:37:31.905784 154064 kubeadm.go:322] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0223 04:37:31.907417 154064 out.go:204] - Booting up control plane ...
I0223 04:37:31.907528 154064 command_runner.go:130] > [control-plane] Creating static Pod manifest for "kube-apiserver"
I0223 04:37:31.907540 154064 kubeadm.go:322] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0223 04:37:31.907639 154064 command_runner.go:130] > [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0223 04:37:31.907658 154064 kubeadm.go:322] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0223 04:37:31.907748 154064 command_runner.go:130] > [control-plane] Creating static Pod manifest for "kube-scheduler"
I0223 04:37:31.907757 154064 kubeadm.go:322] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0223 04:37:31.907853 154064 command_runner.go:130] > [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0223 04:37:31.907859 154064 kubeadm.go:322] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0223 04:37:31.908115 154064 command_runner.go:130] > [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
I0223 04:37:31.908124 154064 kubeadm.go:322] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
I0223 04:37:31.908221 154064 command_runner.go:130] > [apiclient] All control plane components are healthy after 9.502535 seconds
I0223 04:37:31.908227 154064 kubeadm.go:322] [apiclient] All control plane components are healthy after 9.502535 seconds
I0223 04:37:31.908364 154064 command_runner.go:130] > [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0223 04:37:31.908369 154064 kubeadm.go:322] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0223 04:37:31.908529 154064 command_runner.go:130] > [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I0223 04:37:31.908536 154064 kubeadm.go:322] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I0223 04:37:31.908622 154064 command_runner.go:130] > [upload-certs] Skipping phase. Please see --upload-certs
I0223 04:37:31.908628 154064 kubeadm.go:322] [upload-certs] Skipping phase. Please see --upload-certs
I0223 04:37:31.908858 154064 command_runner.go:130] > [mark-control-plane] Marking the node multinode-541903 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I0223 04:37:31.908865 154064 kubeadm.go:322] [mark-control-plane] Marking the node multinode-541903 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I0223 04:37:31.908937 154064 command_runner.go:130] > [bootstrap-token] Using token: chp8yf.ln64q86yz46rx07p
I0223 04:37:31.908943 154064 kubeadm.go:322] [bootstrap-token] Using token: chp8yf.ln64q86yz46rx07p
I0223 04:37:31.910415 154064 out.go:204] - Configuring RBAC rules ...
I0223 04:37:31.910532 154064 command_runner.go:130] > [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I0223 04:37:31.910541 154064 kubeadm.go:322] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I0223 04:37:31.910656 154064 command_runner.go:130] > [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I0223 04:37:31.910668 154064 kubeadm.go:322] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I0223 04:37:31.910816 154064 command_runner.go:130] > [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I0223 04:37:31.910826 154064 kubeadm.go:322] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I0223 04:37:31.910950 154064 command_runner.go:130] > [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I0223 04:37:31.910958 154064 kubeadm.go:322] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I0223 04:37:31.911073 154064 command_runner.go:130] > [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I0223 04:37:31.911080 154064 kubeadm.go:322] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I0223 04:37:31.911202 154064 command_runner.go:130] > [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I0223 04:37:31.911217 154064 kubeadm.go:322] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I0223 04:37:31.911366 154064 command_runner.go:130] > [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I0223 04:37:31.911375 154064 kubeadm.go:322] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I0223 04:37:31.911426 154064 command_runner.go:130] > [addons] Applied essential addon: CoreDNS
I0223 04:37:31.911434 154064 kubeadm.go:322] [addons] Applied essential addon: CoreDNS
I0223 04:37:31.911515 154064 command_runner.go:130] > [addons] Applied essential addon: kube-proxy
I0223 04:37:31.911532 154064 kubeadm.go:322] [addons] Applied essential addon: kube-proxy
I0223 04:37:31.911539 154064 kubeadm.go:322]
I0223 04:37:31.911628 154064 command_runner.go:130] > Your Kubernetes control-plane has initialized successfully!
I0223 04:37:31.911639 154064 kubeadm.go:322] Your Kubernetes control-plane has initialized successfully!
I0223 04:37:31.911645 154064 kubeadm.go:322]
I0223 04:37:31.911748 154064 command_runner.go:130] > To start using your cluster, you need to run the following as a regular user:
I0223 04:37:31.911757 154064 kubeadm.go:322] To start using your cluster, you need to run the following as a regular user:
I0223 04:37:31.911760 154064 kubeadm.go:322]
I0223 04:37:31.911788 154064 command_runner.go:130] > mkdir -p $HOME/.kube
I0223 04:37:31.911797 154064 kubeadm.go:322] mkdir -p $HOME/.kube
I0223 04:37:31.911869 154064 command_runner.go:130] > sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I0223 04:37:31.911877 154064 kubeadm.go:322] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I0223 04:37:31.911959 154064 command_runner.go:130] > sudo chown $(id -u):$(id -g) $HOME/.kube/config
I0223 04:37:31.911974 154064 kubeadm.go:322] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I0223 04:37:31.911982 154064 kubeadm.go:322]
I0223 04:37:31.912074 154064 command_runner.go:130] > Alternatively, if you are the root user, you can run:
I0223 04:37:31.912083 154064 kubeadm.go:322] Alternatively, if you are the root user, you can run:
I0223 04:37:31.912088 154064 kubeadm.go:322]
I0223 04:37:31.912150 154064 command_runner.go:130] > export KUBECONFIG=/etc/kubernetes/admin.conf
I0223 04:37:31.912158 154064 kubeadm.go:322] export KUBECONFIG=/etc/kubernetes/admin.conf
I0223 04:37:31.912163 154064 kubeadm.go:322]
I0223 04:37:31.912244 154064 command_runner.go:130] > You should now deploy a pod network to the cluster.
I0223 04:37:31.912256 154064 kubeadm.go:322] You should now deploy a pod network to the cluster.
I0223 04:37:31.912350 154064 command_runner.go:130] > Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I0223 04:37:31.912360 154064 kubeadm.go:322] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I0223 04:37:31.912453 154064 command_runner.go:130] > https://kubernetes.io/docs/concepts/cluster-administration/addons/
I0223 04:37:31.912462 154064 kubeadm.go:322] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I0223 04:37:31.912468 154064 kubeadm.go:322]
I0223 04:37:31.912539 154064 command_runner.go:130] > You can now join any number of control-plane nodes by copying certificate authorities
I0223 04:37:31.912546 154064 kubeadm.go:322] You can now join any number of control-plane nodes by copying certificate authorities
I0223 04:37:31.912644 154064 command_runner.go:130] > and service account keys on each node and then running the following as root:
I0223 04:37:31.912654 154064 kubeadm.go:322] and service account keys on each node and then running the following as root:
I0223 04:37:31.912659 154064 kubeadm.go:322]
I0223 04:37:31.912773 154064 command_runner.go:130] > kubeadm join control-plane.minikube.internal:8443 --token chp8yf.ln64q86yz46rx07p \
I0223 04:37:31.912782 154064 kubeadm.go:322] kubeadm join control-plane.minikube.internal:8443 --token chp8yf.ln64q86yz46rx07p \
I0223 04:37:31.912922 154064 command_runner.go:130] > --discovery-token-ca-cert-hash sha256:7af6222537f3e512b57f8a6cd5cfb272dbfac3f8b48cc16883a30ea779c69b8f \
I0223 04:37:31.912935 154064 kubeadm.go:322] --discovery-token-ca-cert-hash sha256:7af6222537f3e512b57f8a6cd5cfb272dbfac3f8b48cc16883a30ea779c69b8f \
I0223 04:37:31.912961 154064 command_runner.go:130] > --control-plane
I0223 04:37:31.912968 154064 kubeadm.go:322] --control-plane
I0223 04:37:31.912971 154064 kubeadm.go:322]
I0223 04:37:31.913058 154064 command_runner.go:130] > Then you can join any number of worker nodes by running the following on each as root:
I0223 04:37:31.913066 154064 kubeadm.go:322] Then you can join any number of worker nodes by running the following on each as root:
I0223 04:37:31.913070 154064 kubeadm.go:322]
I0223 04:37:31.913169 154064 command_runner.go:130] > kubeadm join control-plane.minikube.internal:8443 --token chp8yf.ln64q86yz46rx07p \
I0223 04:37:31.913177 154064 kubeadm.go:322] kubeadm join control-plane.minikube.internal:8443 --token chp8yf.ln64q86yz46rx07p \
I0223 04:37:31.913254 154064 command_runner.go:130] > --discovery-token-ca-cert-hash sha256:7af6222537f3e512b57f8a6cd5cfb272dbfac3f8b48cc16883a30ea779c69b8f
I0223 04:37:31.913269 154064 kubeadm.go:322] --discovery-token-ca-cert-hash sha256:7af6222537f3e512b57f8a6cd5cfb272dbfac3f8b48cc16883a30ea779c69b8f
I0223 04:37:31.913276 154064 cni.go:84] Creating CNI manager for ""
I0223 04:37:31.913288 154064 cni.go:136] 1 nodes found, recommending kindnet
I0223 04:37:31.914906 154064 out.go:177] * Configuring CNI (Container Networking Interface) ...
I0223 04:37:31.916236 154064 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I0223 04:37:31.919473 154064 command_runner.go:130] > File: /opt/cni/bin/portmap
I0223 04:37:31.919494 154064 command_runner.go:130] > Size: 2828728 Blocks: 5528 IO Block: 4096 regular file
I0223 04:37:31.919504 154064 command_runner.go:130] > Device: 36h/54d Inode: 1317857 Links: 1
I0223 04:37:31.919512 154064 command_runner.go:130] > Access: (0755/-rwxr-xr-x) Uid: ( 0/ root) Gid: ( 0/ root)
I0223 04:37:31.919522 154064 command_runner.go:130] > Access: 2022-05-18 18:39:21.000000000 +0000
I0223 04:37:31.919533 154064 command_runner.go:130] > Modify: 2022-05-18 18:39:21.000000000 +0000
I0223 04:37:31.919541 154064 command_runner.go:130] > Change: 2023-02-23 04:22:36.812251096 +0000
I0223 04:37:31.919551 154064 command_runner.go:130] > Birth: -
I0223 04:37:31.919594 154064 cni.go:181] applying CNI manifest using /var/lib/minikube/binaries/v1.26.1/kubectl ...
I0223 04:37:31.919605 154064 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2428 bytes)
I0223 04:37:31.934348 154064 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.26.1/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I0223 04:37:32.713207 154064 command_runner.go:130] > clusterrole.rbac.authorization.k8s.io/kindnet created
I0223 04:37:32.716806 154064 command_runner.go:130] > clusterrolebinding.rbac.authorization.k8s.io/kindnet created
I0223 04:37:32.723501 154064 command_runner.go:130] > serviceaccount/kindnet created
I0223 04:37:32.731686 154064 command_runner.go:130] > daemonset.apps/kindnet created
I0223 04:37:32.735181 154064 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I0223 04:37:32.735275 154064 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.26.1/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I0223 04:37:32.735289 154064 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.26.1/kubectl label nodes minikube.k8s.io/version=v1.29.0 minikube.k8s.io/commit=66d56dc3ac28a702789778ac47e90f12526a0321 minikube.k8s.io/name=multinode-541903 minikube.k8s.io/updated_at=2023_02_23T04_37_32_0700 minikube.k8s.io/primary=true --all --overwrite --kubeconfig=/var/lib/minikube/kubeconfig
I0223 04:37:32.811177 154064 command_runner.go:130] > clusterrolebinding.rbac.authorization.k8s.io/minikube-rbac created
I0223 04:37:32.814821 154064 command_runner.go:130] > -16
I0223 04:37:32.814849 154064 ops.go:34] apiserver oom_adj: -16
I0223 04:37:32.814880 154064 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.26.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0223 04:37:32.828665 154064 command_runner.go:130] > node/multinode-541903 labeled
I0223 04:37:32.898067 154064 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
I0223 04:37:33.398827 154064 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.26.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0223 04:37:33.456998 154064 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
I0223 04:37:33.898470 154064 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.26.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0223 04:37:33.955991 154064 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
I0223 04:37:34.399242 154064 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.26.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0223 04:37:34.460729 154064 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
I0223 04:37:34.898325 154064 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.26.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0223 04:37:34.958188 154064 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
I0223 04:37:35.398850 154064 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.26.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0223 04:37:35.457341 154064 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
I0223 04:37:35.898355 154064 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.26.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0223 04:37:35.958424 154064 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
I0223 04:37:36.399240 154064 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.26.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0223 04:37:36.457352 154064 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
I0223 04:37:36.899212 154064 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.26.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0223 04:37:36.959869 154064 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
I0223 04:37:37.398375 154064 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.26.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0223 04:37:37.458000 154064 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
I0223 04:37:37.898529 154064 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.26.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0223 04:37:37.955900 154064 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
I0223 04:37:38.399089 154064 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.26.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0223 04:37:38.457018 154064 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
I0223 04:37:38.899060 154064 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.26.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0223 04:37:38.959119 154064 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
I0223 04:37:39.398711 154064 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.26.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0223 04:37:39.456875 154064 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
I0223 04:37:39.898698 154064 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.26.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0223 04:37:39.957716 154064 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
I0223 04:37:40.398492 154064 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.26.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0223 04:37:40.455461 154064 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
I0223 04:37:40.898375 154064 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.26.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0223 04:37:40.956480 154064 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
I0223 04:37:41.399286 154064 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.26.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0223 04:37:41.460326 154064 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
I0223 04:37:41.898965 154064 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.26.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0223 04:37:41.958927 154064 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
I0223 04:37:42.398506 154064 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.26.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0223 04:37:42.457815 154064 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
I0223 04:37:42.898595 154064 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.26.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0223 04:37:42.958759 154064 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
I0223 04:37:43.398305 154064 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.26.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0223 04:37:43.455141 154064 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
I0223 04:37:43.898926 154064 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.26.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0223 04:37:43.960874 154064 command_runner.go:130] ! Error from server (NotFound): serviceaccounts "default" not found
I0223 04:37:44.398619 154064 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.26.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0223 04:37:44.463861 154064 command_runner.go:130] > NAME SECRETS AGE
I0223 04:37:44.463887 154064 command_runner.go:130] > default 0 0s
I0223 04:37:44.463932 154064 kubeadm.go:1073] duration metric: took 11.72871608s to wait for elevateKubeSystemPrivileges.
I0223 04:37:44.463948 154064 kubeadm.go:403] StartCluster complete in 26.008389921s
I0223 04:37:44.463970 154064 settings.go:142] acquiring lock: {Name:mk480b405fdac5d4c5ac021acbe299356ef5a234 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0223 04:37:44.464039 154064 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/15909-3701/kubeconfig
I0223 04:37:44.464844 154064 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/15909-3701/kubeconfig: {Name:mk3aca4f81f0aa8230f3523c2562c55c1ec285bc Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0223 04:37:44.465071 154064 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.26.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I0223 04:37:44.465090 154064 addons.go:489] enable addons start: toEnable=map[ambassador:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false helm-tiller:false inaccel:false ingress:false ingress-dns:false istio:false istio-provisioner:false kong:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false volumesnapshots:false]
I0223 04:37:44.465185 154064 addons.go:65] Setting storage-provisioner=true in profile "multinode-541903"
I0223 04:37:44.465191 154064 addons.go:65] Setting default-storageclass=true in profile "multinode-541903"
I0223 04:37:44.465205 154064 addons.go:227] Setting addon storage-provisioner=true in "multinode-541903"
I0223 04:37:44.465212 154064 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "multinode-541903"
I0223 04:37:44.465257 154064 host.go:66] Checking if "multinode-541903" exists ...
I0223 04:37:44.465268 154064 config.go:182] Loaded profile config "multinode-541903": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.26.1
I0223 04:37:44.465407 154064 loader.go:373] Config loaded from file: /home/jenkins/minikube-integration/15909-3701/kubeconfig
I0223 04:37:44.465556 154064 cli_runner.go:164] Run: docker container inspect multinode-541903 --format={{.State.Status}}
I0223 04:37:44.465741 154064 cli_runner.go:164] Run: docker container inspect multinode-541903 --format={{.State.Status}}
I0223 04:37:44.465703 154064 kapi.go:59] client config for multinode-541903: &rest.Config{Host:"https://192.168.58.2:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/client.crt", KeyFile:"/home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/client.key", CAFile:"/home/jenkins/minikube-integration/15909-3701/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProt
os:[]string(nil)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x299afe0), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
I0223 04:37:44.466616 154064 cert_rotation.go:137] Starting client certificate rotation controller
I0223 04:37:44.466792 154064 round_trippers.go:463] GET https://192.168.58.2:8443/apis/apps/v1/namespaces/kube-system/deployments/coredns/scale
I0223 04:37:44.466809 154064 round_trippers.go:469] Request Headers:
I0223 04:37:44.466822 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:44.466832 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:44.477666 154064 round_trippers.go:574] Response Status: 200 OK in 10 milliseconds
I0223 04:37:44.477690 154064 round_trippers.go:577] Response Headers:
I0223 04:37:44.477702 154064 round_trippers.go:580] Content-Length: 291
I0223 04:37:44.477716 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:44 GMT
I0223 04:37:44.477726 154064 round_trippers.go:580] Audit-Id: 058c7d46-a03f-4f62-99ad-76b1a6bad99b
I0223 04:37:44.477738 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:44.477747 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:44.477759 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:44.477770 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:44.477801 154064 request.go:1171] Response Body: {"kind":"Scale","apiVersion":"autoscaling/v1","metadata":{"name":"coredns","namespace":"kube-system","uid":"7dc6990a-65df-4e2e-8c11-e636c86cc878","resourceVersion":"331","creationTimestamp":"2023-02-23T04:37:31Z"},"spec":{"replicas":2},"status":{"replicas":0,"selector":"k8s-app=kube-dns"}}
I0223 04:37:44.478308 154064 request.go:1171] Request Body: {"kind":"Scale","apiVersion":"autoscaling/v1","metadata":{"name":"coredns","namespace":"kube-system","uid":"7dc6990a-65df-4e2e-8c11-e636c86cc878","resourceVersion":"331","creationTimestamp":"2023-02-23T04:37:31Z"},"spec":{"replicas":1},"status":{"replicas":0,"selector":"k8s-app=kube-dns"}}
I0223 04:37:44.478367 154064 round_trippers.go:463] PUT https://192.168.58.2:8443/apis/apps/v1/namespaces/kube-system/deployments/coredns/scale
I0223 04:37:44.478374 154064 round_trippers.go:469] Request Headers:
I0223 04:37:44.478384 154064 round_trippers.go:473] Content-Type: application/json
I0223 04:37:44.478399 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:44.478408 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:44.495070 154064 round_trippers.go:574] Response Status: 409 Conflict in 16 milliseconds
I0223 04:37:44.495098 154064 round_trippers.go:577] Response Headers:
I0223 04:37:44.495110 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:44.495120 154064 round_trippers.go:580] Content-Length: 332
I0223 04:37:44.495129 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:44 GMT
I0223 04:37:44.495140 154064 round_trippers.go:580] Audit-Id: 2dea5be8-012a-485d-aca0-613c03e46d0b
I0223 04:37:44.495152 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:44.495162 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:44.495178 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:44.495213 154064 request.go:1171] Response Body: {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Operation cannot be fulfilled on deployments.apps \"coredns\": the object has been modified; please apply your changes to the latest version and try again","reason":"Conflict","details":{"name":"coredns","group":"apps","kind":"deployments"},"code":409}
W0223 04:37:44.495432 154064 kapi.go:245] failed rescaling "coredns" deployment in "kube-system" namespace and "multinode-541903" context to 1 replicas: non-retryable failure while rescaling coredns deployment: Operation cannot be fulfilled on deployments.apps "coredns": the object has been modified; please apply your changes to the latest version and try again
E0223 04:37:44.495451 154064 start.go:219] Unable to scale down deployment "coredns" in namespace "kube-system" to 1 replica: non-retryable failure while rescaling coredns deployment: Operation cannot be fulfilled on deployments.apps "coredns": the object has been modified; please apply your changes to the latest version and try again
I0223 04:37:44.495472 154064 start.go:223] Will wait 6m0s for node &{Name: IP:192.168.58.2 Port:8443 KubernetesVersion:v1.26.1 ContainerRuntime:docker ControlPlane:true Worker:true}
I0223 04:37:44.497093 154064 out.go:177] * Verifying Kubernetes components...
I0223 04:37:44.498608 154064 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0223 04:37:44.549971 154064 command_runner.go:130] > apiVersion: v1
I0223 04:37:44.549994 154064 command_runner.go:130] > data:
I0223 04:37:44.550001 154064 command_runner.go:130] > Corefile: |
I0223 04:37:44.550007 154064 command_runner.go:130] > .:53 {
I0223 04:37:44.550013 154064 command_runner.go:130] > errors
I0223 04:37:44.550020 154064 command_runner.go:130] > health {
I0223 04:37:44.550028 154064 command_runner.go:130] > lameduck 5s
I0223 04:37:44.550034 154064 command_runner.go:130] > }
I0223 04:37:44.550041 154064 command_runner.go:130] > ready
I0223 04:37:44.550054 154064 command_runner.go:130] > kubernetes cluster.local in-addr.arpa ip6.arpa {
I0223 04:37:44.550064 154064 command_runner.go:130] > pods insecure
I0223 04:37:44.550077 154064 command_runner.go:130] > fallthrough in-addr.arpa ip6.arpa
I0223 04:37:44.550090 154064 command_runner.go:130] > ttl 30
I0223 04:37:44.550096 154064 command_runner.go:130] > }
I0223 04:37:44.550104 154064 command_runner.go:130] > prometheus :9153
I0223 04:37:44.550112 154064 command_runner.go:130] > forward . /etc/resolv.conf {
I0223 04:37:44.550119 154064 command_runner.go:130] > max_concurrent 1000
I0223 04:37:44.550125 154064 command_runner.go:130] > }
I0223 04:37:44.550130 154064 command_runner.go:130] > cache 30
I0223 04:37:44.550134 154064 command_runner.go:130] > loop
I0223 04:37:44.550137 154064 command_runner.go:130] > reload
I0223 04:37:44.550143 154064 command_runner.go:130] > loadbalance
I0223 04:37:44.550149 154064 command_runner.go:130] > }
I0223 04:37:44.550161 154064 command_runner.go:130] > kind: ConfigMap
I0223 04:37:44.550167 154064 command_runner.go:130] > metadata:
I0223 04:37:44.550176 154064 command_runner.go:130] > creationTimestamp: "2023-02-23T04:37:31Z"
I0223 04:37:44.550182 154064 command_runner.go:130] > name: coredns
I0223 04:37:44.550188 154064 command_runner.go:130] > namespace: kube-system
I0223 04:37:44.550195 154064 command_runner.go:130] > resourceVersion: "231"
I0223 04:37:44.550203 154064 command_runner.go:130] > uid: 43e6c7f3-47ed-4c34-ba60-0785778c51fb
I0223 04:37:44.553358 154064 out.go:177] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0223 04:37:44.554868 154064 addons.go:419] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0223 04:37:44.554889 154064 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0223 04:37:44.554941 154064 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-541903
I0223 04:37:44.555046 154064 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.26.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.58.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.26.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I0223 04:37:44.555338 154064 loader.go:373] Config loaded from file: /home/jenkins/minikube-integration/15909-3701/kubeconfig
I0223 04:37:44.555655 154064 kapi.go:59] client config for multinode-541903: &rest.Config{Host:"https://192.168.58.2:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/client.crt", KeyFile:"/home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/client.key", CAFile:"/home/jenkins/minikube-integration/15909-3701/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProt
os:[]string(nil)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x299afe0), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
I0223 04:37:44.556046 154064 node_ready.go:35] waiting up to 6m0s for node "multinode-541903" to be "Ready" ...
I0223 04:37:44.556123 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-541903
I0223 04:37:44.556131 154064 round_trippers.go:469] Request Headers:
I0223 04:37:44.556142 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:44.556153 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:44.557872 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:37:44.557891 154064 round_trippers.go:577] Response Headers:
I0223 04:37:44.557903 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:44.557912 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:44.557920 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:44.557931 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:44.557943 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:44 GMT
I0223 04:37:44.557955 154064 round_trippers.go:580] Audit-Id: 0436a187-a18b-4ce3-95f3-47b15a94e191
I0223 04:37:44.558054 154064 request.go:1171] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","resourceVersion":"309","creationTimestamp":"2023-02-23T04:37:28Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-541903","kubernetes.io/os":"linux","minikube.k8s.io/commit":"66d56dc3ac28a702789778ac47e90f12526a0321","minikube.k8s.io/name":"multinode-541903","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_02_23T04_37_32_0700","minikube.k8s.io/version":"v1.29.0","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/cri-dockerd.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","api
Version":"v1","time":"2023-02-23T04:37:28Z","fieldsType":"FieldsV1","fi [truncated 4999 chars]
I0223 04:37:44.558476 154064 loader.go:373] Config loaded from file: /home/jenkins/minikube-integration/15909-3701/kubeconfig
I0223 04:37:44.558586 154064 node_ready.go:49] node "multinode-541903" has status "Ready":"True"
I0223 04:37:44.558602 154064 node_ready.go:38] duration metric: took 2.532082ms waiting for node "multinode-541903" to be "Ready" ...
I0223 04:37:44.558612 154064 pod_ready.go:35] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0223 04:37:44.558670 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods
I0223 04:37:44.558682 154064 round_trippers.go:469] Request Headers:
I0223 04:37:44.558692 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:44.558702 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:44.558724 154064 kapi.go:59] client config for multinode-541903: &rest.Config{Host:"https://192.168.58.2:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/client.crt", KeyFile:"/home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/client.key", CAFile:"/home/jenkins/minikube-integration/15909-3701/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProt
os:[]string(nil)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x299afe0), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
I0223 04:37:44.559067 154064 round_trippers.go:463] GET https://192.168.58.2:8443/apis/storage.k8s.io/v1/storageclasses
I0223 04:37:44.559077 154064 round_trippers.go:469] Request Headers:
I0223 04:37:44.559086 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:44.559101 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:44.560842 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:37:44.560864 154064 round_trippers.go:577] Response Headers:
I0223 04:37:44.560874 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:44.560883 154064 round_trippers.go:580] Content-Length: 109
I0223 04:37:44.560892 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:44 GMT
I0223 04:37:44.560899 154064 round_trippers.go:580] Audit-Id: 6b6f4e84-68b0-4c86-a0e5-48da28342362
I0223 04:37:44.560908 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:44.560921 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:44.560930 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:44.560949 154064 request.go:1171] Response Body: {"kind":"StorageClassList","apiVersion":"storage.k8s.io/v1","metadata":{"resourceVersion":"333"},"items":[]}
I0223 04:37:44.561171 154064 addons.go:227] Setting addon default-storageclass=true in "multinode-541903"
I0223 04:37:44.561205 154064 host.go:66] Checking if "multinode-541903" exists ...
I0223 04:37:44.561608 154064 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I0223 04:37:44.561638 154064 round_trippers.go:577] Response Headers:
I0223 04:37:44.561649 154064 round_trippers.go:580] Audit-Id: 3b221917-a83e-47e2-964a-e83949f6b28d
I0223 04:37:44.561658 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:44.561659 154064 cli_runner.go:164] Run: docker container inspect multinode-541903 --format={{.State.Status}}
I0223 04:37:44.561666 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:44.561675 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:44.561684 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:44.561692 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:44 GMT
I0223 04:37:44.562126 154064 request.go:1171] Response Body: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"333"},"items":[{"metadata":{"name":"etcd-multinode-541903","namespace":"kube-system","uid":"d4f461e5-807f-4ab5-9619-f9678e053114","resourceVersion":"274","creationTimestamp":"2023-02-23T04:37:32Z","labels":{"component":"etcd","tier":"control-plane"},"annotations":{"kubeadm.kubernetes.io/etcd.advertise-client-urls":"https://192.168.58.2:2379","kubernetes.io/config.hash":"6d6c313fb5d82868974d1b5864c9ad55","kubernetes.io/config.mirror":"6d6c313fb5d82868974d1b5864c9ad55","kubernetes.io/config.seen":"2023-02-23T04:37:31.811093863Z","kubernetes.io/config.source":"file"},"ownerReferences":[{"apiVersion":"v1","kind":"Node","name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","controller":true}],"managedFields":[{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-02-23T04:37:32Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations"
:{".":{},"f:kubeadm.kubernetes.io/etcd.advertise-client-urls":{},"f:kub [truncated 43441 chars]
I0223 04:37:44.566131 154064 pod_ready.go:78] waiting up to 6m0s for pod "etcd-multinode-541903" in "kube-system" namespace to be "Ready" ...
I0223 04:37:44.566201 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/etcd-multinode-541903
I0223 04:37:44.566208 154064 round_trippers.go:469] Request Headers:
I0223 04:37:44.566219 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:44.566228 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:44.568290 154064 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I0223 04:37:44.568309 154064 round_trippers.go:577] Response Headers:
I0223 04:37:44.568320 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:44.568329 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:44 GMT
I0223 04:37:44.568337 154064 round_trippers.go:580] Audit-Id: 4ac340b3-8230-4628-a0a3-d3ee0dcc2def
I0223 04:37:44.568346 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:44.568355 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:44.568363 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:44.568482 154064 request.go:1171] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"etcd-multinode-541903","namespace":"kube-system","uid":"d4f461e5-807f-4ab5-9619-f9678e053114","resourceVersion":"274","creationTimestamp":"2023-02-23T04:37:32Z","labels":{"component":"etcd","tier":"control-plane"},"annotations":{"kubeadm.kubernetes.io/etcd.advertise-client-urls":"https://192.168.58.2:2379","kubernetes.io/config.hash":"6d6c313fb5d82868974d1b5864c9ad55","kubernetes.io/config.mirror":"6d6c313fb5d82868974d1b5864c9ad55","kubernetes.io/config.seen":"2023-02-23T04:37:31.811093863Z","kubernetes.io/config.source":"file"},"ownerReferences":[{"apiVersion":"v1","kind":"Node","name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","controller":true}],"managedFields":[{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-02-23T04:37:32Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:kubeadm.kubernetes.io/etcd.advertise-cl
ient-urls":{},"f:kubernetes.io/config.hash":{},"f:kubernetes.io/config. [truncated 5836 chars]
I0223 04:37:44.568911 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-541903
I0223 04:37:44.568922 154064 round_trippers.go:469] Request Headers:
I0223 04:37:44.568932 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:44.568943 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:44.570795 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:37:44.570809 154064 round_trippers.go:577] Response Headers:
I0223 04:37:44.570816 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:44.570824 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:44 GMT
I0223 04:37:44.570832 154064 round_trippers.go:580] Audit-Id: b19eaa4a-3656-4831-9c5f-7ea424c8294b
I0223 04:37:44.570840 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:44.570849 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:44.570857 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:44.570948 154064 request.go:1171] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","resourceVersion":"309","creationTimestamp":"2023-02-23T04:37:28Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-541903","kubernetes.io/os":"linux","minikube.k8s.io/commit":"66d56dc3ac28a702789778ac47e90f12526a0321","minikube.k8s.io/name":"multinode-541903","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_02_23T04_37_32_0700","minikube.k8s.io/version":"v1.29.0","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/cri-dockerd.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","api
Version":"v1","time":"2023-02-23T04:37:28Z","fieldsType":"FieldsV1","fi [truncated 4999 chars]
I0223 04:37:44.571223 154064 pod_ready.go:92] pod "etcd-multinode-541903" in "kube-system" namespace has status "Ready":"True"
I0223 04:37:44.571230 154064 pod_ready.go:81] duration metric: took 5.078474ms waiting for pod "etcd-multinode-541903" in "kube-system" namespace to be "Ready" ...
I0223 04:37:44.571239 154064 pod_ready.go:78] waiting up to 6m0s for pod "kube-apiserver-multinode-541903" in "kube-system" namespace to be "Ready" ...
I0223 04:37:44.571277 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-multinode-541903
I0223 04:37:44.571281 154064 round_trippers.go:469] Request Headers:
I0223 04:37:44.571287 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:44.571293 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:44.588855 154064 round_trippers.go:574] Response Status: 200 OK in 17 milliseconds
I0223 04:37:44.588876 154064 round_trippers.go:577] Response Headers:
I0223 04:37:44.588886 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:44.588895 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:44.588927 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:44.588940 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:44 GMT
I0223 04:37:44.588949 154064 round_trippers.go:580] Audit-Id: b3f4b2d6-9345-4288-89be-f05787508ee0
I0223 04:37:44.588959 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:44.589191 154064 request.go:1171] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"kube-apiserver-multinode-541903","namespace":"kube-system","uid":"4dac4fc4-a548-48fb-a9d3-2caab1386063","resourceVersion":"271","creationTimestamp":"2023-02-23T04:37:32Z","labels":{"component":"kube-apiserver","tier":"control-plane"},"annotations":{"kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint":"192.168.58.2:8443","kubernetes.io/config.hash":"2445aa6163a4ba977c3760378d3466e2","kubernetes.io/config.mirror":"2445aa6163a4ba977c3760378d3466e2","kubernetes.io/config.seen":"2023-02-23T04:37:31.811114466Z","kubernetes.io/config.source":"file"},"ownerReferences":[{"apiVersion":"v1","kind":"Node","name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","controller":true}],"managedFields":[{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-02-23T04:37:32Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:kubeadm.kube
rnetes.io/kube-apiserver.advertise-address.endpoint":{},"f:kubernetes.i [truncated 8466 chars]
I0223 04:37:44.589745 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-541903
I0223 04:37:44.589756 154064 round_trippers.go:469] Request Headers:
I0223 04:37:44.589764 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:44.589772 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:44.591474 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:37:44.591489 154064 round_trippers.go:577] Response Headers:
I0223 04:37:44.591496 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:44.591502 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:44.591507 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:44.591517 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:44.591522 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:44 GMT
I0223 04:37:44.591532 154064 round_trippers.go:580] Audit-Id: 3363499d-da4f-45e7-b340-e8758374d8d9
I0223 04:37:44.591621 154064 request.go:1171] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","resourceVersion":"309","creationTimestamp":"2023-02-23T04:37:28Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-541903","kubernetes.io/os":"linux","minikube.k8s.io/commit":"66d56dc3ac28a702789778ac47e90f12526a0321","minikube.k8s.io/name":"multinode-541903","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_02_23T04_37_32_0700","minikube.k8s.io/version":"v1.29.0","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/cri-dockerd.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","api
Version":"v1","time":"2023-02-23T04:37:28Z","fieldsType":"FieldsV1","fi [truncated 4999 chars]
I0223 04:37:44.639652 154064 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32855 SSHKeyPath:/home/jenkins/minikube-integration/15909-3701/.minikube/machines/multinode-541903/id_rsa Username:docker}
I0223 04:37:44.659993 154064 addons.go:419] installing /etc/kubernetes/addons/storageclass.yaml
I0223 04:37:44.660017 154064 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0223 04:37:44.660099 154064 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-541903
I0223 04:37:44.744124 154064 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32855 SSHKeyPath:/home/jenkins/minikube-integration/15909-3701/.minikube/machines/multinode-541903/id_rsa Username:docker}
I0223 04:37:44.804913 154064 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.26.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0223 04:37:44.993431 154064 command_runner.go:130] > configmap/coredns replaced
I0223 04:37:44.997960 154064 start.go:921] {"host.minikube.internal": 192.168.58.1} host record injected into CoreDNS's ConfigMap
I0223 04:37:45.001727 154064 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.26.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0223 04:37:45.092845 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-multinode-541903
I0223 04:37:45.092867 154064 round_trippers.go:469] Request Headers:
I0223 04:37:45.092880 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:45.092890 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:45.095346 154064 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I0223 04:37:45.095371 154064 round_trippers.go:577] Response Headers:
I0223 04:37:45.095383 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:45.095392 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:45.095402 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:45 GMT
I0223 04:37:45.095414 154064 round_trippers.go:580] Audit-Id: c789dd33-3725-4a7c-a64d-a1a554a63b00
I0223 04:37:45.095423 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:45.095438 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:45.095653 154064 request.go:1171] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"kube-apiserver-multinode-541903","namespace":"kube-system","uid":"4dac4fc4-a548-48fb-a9d3-2caab1386063","resourceVersion":"271","creationTimestamp":"2023-02-23T04:37:32Z","labels":{"component":"kube-apiserver","tier":"control-plane"},"annotations":{"kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint":"192.168.58.2:8443","kubernetes.io/config.hash":"2445aa6163a4ba977c3760378d3466e2","kubernetes.io/config.mirror":"2445aa6163a4ba977c3760378d3466e2","kubernetes.io/config.seen":"2023-02-23T04:37:31.811114466Z","kubernetes.io/config.source":"file"},"ownerReferences":[{"apiVersion":"v1","kind":"Node","name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","controller":true}],"managedFields":[{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-02-23T04:37:32Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:kubeadm.kube
rnetes.io/kube-apiserver.advertise-address.endpoint":{},"f:kubernetes.i [truncated 8466 chars]
I0223 04:37:45.096272 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-541903
I0223 04:37:45.096290 154064 round_trippers.go:469] Request Headers:
I0223 04:37:45.096301 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:45.096312 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:45.098398 154064 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I0223 04:37:45.098422 154064 round_trippers.go:577] Response Headers:
I0223 04:37:45.098433 154064 round_trippers.go:580] Audit-Id: cd2d9fad-94df-47b9-9db2-fadb7b11e19d
I0223 04:37:45.098449 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:45.098462 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:45.098471 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:45.098480 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:45.098493 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:45 GMT
I0223 04:37:45.098584 154064 request.go:1171] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","resourceVersion":"309","creationTimestamp":"2023-02-23T04:37:28Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-541903","kubernetes.io/os":"linux","minikube.k8s.io/commit":"66d56dc3ac28a702789778ac47e90f12526a0321","minikube.k8s.io/name":"multinode-541903","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_02_23T04_37_32_0700","minikube.k8s.io/version":"v1.29.0","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/cri-dockerd.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","api
Version":"v1","time":"2023-02-23T04:37:28Z","fieldsType":"FieldsV1","fi [truncated 4999 chars]
I0223 04:37:45.218585 154064 command_runner.go:130] > serviceaccount/storage-provisioner created
I0223 04:37:45.289997 154064 command_runner.go:130] > clusterrolebinding.rbac.authorization.k8s.io/storage-provisioner created
I0223 04:37:45.300187 154064 command_runner.go:130] > role.rbac.authorization.k8s.io/system:persistent-volume-provisioner created
I0223 04:37:45.306957 154064 command_runner.go:130] > rolebinding.rbac.authorization.k8s.io/system:persistent-volume-provisioner created
I0223 04:37:45.314774 154064 command_runner.go:130] > endpoints/k8s.io-minikube-hostpath created
I0223 04:37:45.324067 154064 command_runner.go:130] > pod/storage-provisioner created
I0223 04:37:45.496147 154064 command_runner.go:130] > storageclass.storage.k8s.io/standard created
I0223 04:37:45.502159 154064 out.go:177] * Enabled addons: storage-provisioner, default-storageclass
I0223 04:37:45.503557 154064 addons.go:492] enable addons completed in 1.038466011s: enabled=[storage-provisioner default-storageclass]
I0223 04:37:45.593261 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-multinode-541903
I0223 04:37:45.593289 154064 round_trippers.go:469] Request Headers:
I0223 04:37:45.593301 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:45.593312 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:45.595901 154064 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I0223 04:37:45.595934 154064 round_trippers.go:577] Response Headers:
I0223 04:37:45.595944 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:45 GMT
I0223 04:37:45.595952 154064 round_trippers.go:580] Audit-Id: 02f2eb0e-72ee-42b5-a943-50dbc2eded44
I0223 04:37:45.595960 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:45.595968 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:45.595975 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:45.595983 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:45.596187 154064 request.go:1171] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"kube-apiserver-multinode-541903","namespace":"kube-system","uid":"4dac4fc4-a548-48fb-a9d3-2caab1386063","resourceVersion":"271","creationTimestamp":"2023-02-23T04:37:32Z","labels":{"component":"kube-apiserver","tier":"control-plane"},"annotations":{"kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint":"192.168.58.2:8443","kubernetes.io/config.hash":"2445aa6163a4ba977c3760378d3466e2","kubernetes.io/config.mirror":"2445aa6163a4ba977c3760378d3466e2","kubernetes.io/config.seen":"2023-02-23T04:37:31.811114466Z","kubernetes.io/config.source":"file"},"ownerReferences":[{"apiVersion":"v1","kind":"Node","name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","controller":true}],"managedFields":[{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-02-23T04:37:32Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:kubeadm.kube
rnetes.io/kube-apiserver.advertise-address.endpoint":{},"f:kubernetes.i [truncated 8466 chars]
I0223 04:37:45.596848 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-541903
I0223 04:37:45.596863 154064 round_trippers.go:469] Request Headers:
I0223 04:37:45.596873 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:45.596881 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:45.598791 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:37:45.598813 154064 round_trippers.go:577] Response Headers:
I0223 04:37:45.598824 154064 round_trippers.go:580] Audit-Id: 021de902-0753-4005-b456-8d0f62975485
I0223 04:37:45.598833 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:45.598854 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:45.598867 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:45.598876 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:45.598886 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:45 GMT
I0223 04:37:45.598985 154064 request.go:1171] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","resourceVersion":"309","creationTimestamp":"2023-02-23T04:37:28Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-541903","kubernetes.io/os":"linux","minikube.k8s.io/commit":"66d56dc3ac28a702789778ac47e90f12526a0321","minikube.k8s.io/name":"multinode-541903","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_02_23T04_37_32_0700","minikube.k8s.io/version":"v1.29.0","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/cri-dockerd.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","api
Version":"v1","time":"2023-02-23T04:37:28Z","fieldsType":"FieldsV1","fi [truncated 4999 chars]
I0223 04:37:46.093136 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-multinode-541903
I0223 04:37:46.093164 154064 round_trippers.go:469] Request Headers:
I0223 04:37:46.093177 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:46.093187 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:46.095740 154064 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I0223 04:37:46.095794 154064 round_trippers.go:577] Response Headers:
I0223 04:37:46.095807 154064 round_trippers.go:580] Audit-Id: f154b82a-c8b8-49ca-b9d5-14fb80743927
I0223 04:37:46.095818 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:46.095832 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:46.095843 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:46.095853 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:46.095874 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:46 GMT
I0223 04:37:46.096083 154064 request.go:1171] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"kube-apiserver-multinode-541903","namespace":"kube-system","uid":"4dac4fc4-a548-48fb-a9d3-2caab1386063","resourceVersion":"271","creationTimestamp":"2023-02-23T04:37:32Z","labels":{"component":"kube-apiserver","tier":"control-plane"},"annotations":{"kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint":"192.168.58.2:8443","kubernetes.io/config.hash":"2445aa6163a4ba977c3760378d3466e2","kubernetes.io/config.mirror":"2445aa6163a4ba977c3760378d3466e2","kubernetes.io/config.seen":"2023-02-23T04:37:31.811114466Z","kubernetes.io/config.source":"file"},"ownerReferences":[{"apiVersion":"v1","kind":"Node","name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","controller":true}],"managedFields":[{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-02-23T04:37:32Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:kubeadm.kube
rnetes.io/kube-apiserver.advertise-address.endpoint":{},"f:kubernetes.i [truncated 8466 chars]
I0223 04:37:46.096667 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-541903
I0223 04:37:46.096683 154064 round_trippers.go:469] Request Headers:
I0223 04:37:46.096693 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:46.096702 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:46.098777 154064 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I0223 04:37:46.098821 154064 round_trippers.go:577] Response Headers:
I0223 04:37:46.098842 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:46.098852 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:46.098865 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:46 GMT
I0223 04:37:46.098874 154064 round_trippers.go:580] Audit-Id: a87e80e6-0f82-4d59-9847-6abe5a3f7039
I0223 04:37:46.098884 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:46.098893 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:46.099012 154064 request.go:1171] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","resourceVersion":"309","creationTimestamp":"2023-02-23T04:37:28Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-541903","kubernetes.io/os":"linux","minikube.k8s.io/commit":"66d56dc3ac28a702789778ac47e90f12526a0321","minikube.k8s.io/name":"multinode-541903","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_02_23T04_37_32_0700","minikube.k8s.io/version":"v1.29.0","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/cri-dockerd.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","api
Version":"v1","time":"2023-02-23T04:37:28Z","fieldsType":"FieldsV1","fi [truncated 4999 chars]
I0223 04:37:46.592997 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-multinode-541903
I0223 04:37:46.593060 154064 round_trippers.go:469] Request Headers:
I0223 04:37:46.593081 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:46.593098 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:46.595440 154064 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I0223 04:37:46.595469 154064 round_trippers.go:577] Response Headers:
I0223 04:37:46.595481 154064 round_trippers.go:580] Audit-Id: 4fd7f97b-93c4-4066-83e3-303b0a495f31
I0223 04:37:46.595502 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:46.595515 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:46.595527 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:46.595540 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:46.595562 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:46 GMT
I0223 04:37:46.595750 154064 request.go:1171] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"kube-apiserver-multinode-541903","namespace":"kube-system","uid":"4dac4fc4-a548-48fb-a9d3-2caab1386063","resourceVersion":"367","creationTimestamp":"2023-02-23T04:37:32Z","labels":{"component":"kube-apiserver","tier":"control-plane"},"annotations":{"kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint":"192.168.58.2:8443","kubernetes.io/config.hash":"2445aa6163a4ba977c3760378d3466e2","kubernetes.io/config.mirror":"2445aa6163a4ba977c3760378d3466e2","kubernetes.io/config.seen":"2023-02-23T04:37:31.811114466Z","kubernetes.io/config.source":"file"},"ownerReferences":[{"apiVersion":"v1","kind":"Node","name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","controller":true}],"managedFields":[{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-02-23T04:37:32Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:kubeadm.kube
rnetes.io/kube-apiserver.advertise-address.endpoint":{},"f:kubernetes.i [truncated 8465 chars]
I0223 04:37:46.596367 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-541903
I0223 04:37:46.596388 154064 round_trippers.go:469] Request Headers:
I0223 04:37:46.596399 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:46.596409 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:46.598292 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:37:46.598356 154064 round_trippers.go:577] Response Headers:
I0223 04:37:46.598383 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:46.598403 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:46 GMT
I0223 04:37:46.598426 154064 round_trippers.go:580] Audit-Id: 122c00ed-556d-4f8d-9df0-eb7ec2844fe5
I0223 04:37:46.598460 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:46.598478 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:46.598508 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:46.598628 154064 request.go:1171] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","resourceVersion":"309","creationTimestamp":"2023-02-23T04:37:28Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-541903","kubernetes.io/os":"linux","minikube.k8s.io/commit":"66d56dc3ac28a702789778ac47e90f12526a0321","minikube.k8s.io/name":"multinode-541903","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_02_23T04_37_32_0700","minikube.k8s.io/version":"v1.29.0","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/cri-dockerd.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","api
Version":"v1","time":"2023-02-23T04:37:28Z","fieldsType":"FieldsV1","fi [truncated 4999 chars]
I0223 04:37:46.599024 154064 pod_ready.go:102] pod "kube-apiserver-multinode-541903" in "kube-system" namespace has status "Ready":"False"
I0223 04:37:47.092695 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-multinode-541903
I0223 04:37:47.092778 154064 round_trippers.go:469] Request Headers:
I0223 04:37:47.092796 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:47.092806 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:47.095172 154064 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I0223 04:37:47.095196 154064 round_trippers.go:577] Response Headers:
I0223 04:37:47.095208 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:47.095267 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:47.095280 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:47.095290 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:47.095302 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:47 GMT
I0223 04:37:47.095313 154064 round_trippers.go:580] Audit-Id: 3afe9c16-3547-432d-ad77-cbdd8d5a8b88
I0223 04:37:47.095458 154064 request.go:1171] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"kube-apiserver-multinode-541903","namespace":"kube-system","uid":"4dac4fc4-a548-48fb-a9d3-2caab1386063","resourceVersion":"377","creationTimestamp":"2023-02-23T04:37:32Z","labels":{"component":"kube-apiserver","tier":"control-plane"},"annotations":{"kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint":"192.168.58.2:8443","kubernetes.io/config.hash":"2445aa6163a4ba977c3760378d3466e2","kubernetes.io/config.mirror":"2445aa6163a4ba977c3760378d3466e2","kubernetes.io/config.seen":"2023-02-23T04:37:31.811114466Z","kubernetes.io/config.source":"file"},"ownerReferences":[{"apiVersion":"v1","kind":"Node","name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","controller":true}],"managedFields":[{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-02-23T04:37:32Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:kubeadm.kube
rnetes.io/kube-apiserver.advertise-address.endpoint":{},"f:kubernetes.i [truncated 8222 chars]
I0223 04:37:47.095947 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-541903
I0223 04:37:47.095963 154064 round_trippers.go:469] Request Headers:
I0223 04:37:47.095973 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:47.095983 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:47.097933 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:37:47.097953 154064 round_trippers.go:577] Response Headers:
I0223 04:37:47.097964 154064 round_trippers.go:580] Audit-Id: d7603004-1858-4eb3-bb3e-8fb8a5de237d
I0223 04:37:47.097974 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:47.097987 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:47.097997 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:47.098009 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:47.098022 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:47 GMT
I0223 04:37:47.098110 154064 request.go:1171] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","resourceVersion":"309","creationTimestamp":"2023-02-23T04:37:28Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-541903","kubernetes.io/os":"linux","minikube.k8s.io/commit":"66d56dc3ac28a702789778ac47e90f12526a0321","minikube.k8s.io/name":"multinode-541903","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_02_23T04_37_32_0700","minikube.k8s.io/version":"v1.29.0","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/cri-dockerd.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","api
Version":"v1","time":"2023-02-23T04:37:28Z","fieldsType":"FieldsV1","fi [truncated 4999 chars]
I0223 04:37:47.098472 154064 pod_ready.go:92] pod "kube-apiserver-multinode-541903" in "kube-system" namespace has status "Ready":"True"
I0223 04:37:47.098497 154064 pod_ready.go:81] duration metric: took 2.527251157s waiting for pod "kube-apiserver-multinode-541903" in "kube-system" namespace to be "Ready" ...
I0223 04:37:47.098511 154064 pod_ready.go:78] waiting up to 6m0s for pod "kube-controller-manager-multinode-541903" in "kube-system" namespace to be "Ready" ...
I0223 04:37:47.098567 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-multinode-541903
I0223 04:37:47.098576 154064 round_trippers.go:469] Request Headers:
I0223 04:37:47.098587 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:47.098600 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:47.100501 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:37:47.100524 154064 round_trippers.go:577] Response Headers:
I0223 04:37:47.100535 154064 round_trippers.go:580] Audit-Id: f2c93ba8-729c-415e-b8f6-2f27922f4cb2
I0223 04:37:47.100551 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:47.100561 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:47.100576 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:47.100586 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:47.100594 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:47 GMT
I0223 04:37:47.100764 154064 request.go:1171] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"kube-controller-manager-multinode-541903","namespace":"kube-system","uid":"e94928dd-b122-471e-b5df-f9df230f4d39","resourceVersion":"301","creationTimestamp":"2023-02-23T04:37:31Z","labels":{"component":"kube-controller-manager","tier":"control-plane"},"annotations":{"kubernetes.io/config.hash":"ab29de2731c8470962019cdd999d6ffc","kubernetes.io/config.mirror":"ab29de2731c8470962019cdd999d6ffc","kubernetes.io/config.seen":"2023-02-23T04:37:21.312034503Z","kubernetes.io/config.source":"file"},"ownerReferences":[{"apiVersion":"v1","kind":"Node","name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","controller":true}],"managedFields":[{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-02-23T04:37:31Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:kubernetes.io/config.hash":{},"f:kubernetes.io/config.mirror":{},"f:kubernetes.i
o/config.seen":{},"f:kubernetes.io/config.source":{}},"f:labels":{".":{ [truncated 7797 chars]
I0223 04:37:47.101292 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-541903
I0223 04:37:47.101309 154064 round_trippers.go:469] Request Headers:
I0223 04:37:47.101319 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:47.101332 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:47.103038 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:37:47.103064 154064 round_trippers.go:577] Response Headers:
I0223 04:37:47.103074 154064 round_trippers.go:580] Audit-Id: 44a0970b-6d6b-47a2-9a34-aa9d8c433c1c
I0223 04:37:47.103084 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:47.103091 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:47.103100 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:47.103114 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:47.103123 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:47 GMT
I0223 04:37:47.103223 154064 request.go:1171] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","resourceVersion":"309","creationTimestamp":"2023-02-23T04:37:28Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-541903","kubernetes.io/os":"linux","minikube.k8s.io/commit":"66d56dc3ac28a702789778ac47e90f12526a0321","minikube.k8s.io/name":"multinode-541903","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_02_23T04_37_32_0700","minikube.k8s.io/version":"v1.29.0","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/cri-dockerd.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","api
Version":"v1","time":"2023-02-23T04:37:28Z","fieldsType":"FieldsV1","fi [truncated 4999 chars]
I0223 04:37:47.103530 154064 pod_ready.go:92] pod "kube-controller-manager-multinode-541903" in "kube-system" namespace has status "Ready":"True"
I0223 04:37:47.103539 154064 pod_ready.go:81] duration metric: took 5.012149ms waiting for pod "kube-controller-manager-multinode-541903" in "kube-system" namespace to be "Ready" ...
I0223 04:37:47.103550 154064 pod_ready.go:78] waiting up to 6m0s for pod "kube-proxy-b9nwm" in "kube-system" namespace to be "Ready" ...
I0223 04:37:47.103595 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-proxy-b9nwm
I0223 04:37:47.103601 154064 round_trippers.go:469] Request Headers:
I0223 04:37:47.103611 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:47.103621 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:47.105331 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:37:47.105353 154064 round_trippers.go:577] Response Headers:
I0223 04:37:47.105363 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:47 GMT
I0223 04:37:47.105374 154064 round_trippers.go:580] Audit-Id: e70a1dba-5c77-4881-87b9-f112e6c6e5b7
I0223 04:37:47.105386 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:47.105398 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:47.105408 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:47.105420 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:47.105576 154064 request.go:1171] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"kube-proxy-b9nwm","generateName":"kube-proxy-","namespace":"kube-system","uid":"aa99444f-90a3-4ea0-98ff-ccd3241d4a2c","resourceVersion":"326","creationTimestamp":"2023-02-23T04:37:44Z","labels":{"controller-revision-hash":"6bc4695d8c","k8s-app":"kube-proxy","pod-template-generation":"1"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"DaemonSet","name":"kube-proxy","uid":"20ec477f-9776-4ad2-ac05-cf498eae4952","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-02-23T04:37:44Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:controller-revision-hash":{},"f:k8s-app":{},"f:pod-template-generation":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"20ec477f-9776-4ad2-ac05-cf498eae4952\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:nodeAffinity":{".":{},"f:r
equiredDuringSchedulingIgnoredDuringExecution":{}}},"f:containers":{"k: [truncated 5540 chars]
I0223 04:37:47.105994 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-541903
I0223 04:37:47.106008 154064 round_trippers.go:469] Request Headers:
I0223 04:37:47.106015 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:47.106022 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:47.107493 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:37:47.107515 154064 round_trippers.go:577] Response Headers:
I0223 04:37:47.107524 154064 round_trippers.go:580] Audit-Id: 054aaeb0-d7d9-4d66-b32d-6211ecadb623
I0223 04:37:47.107534 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:47.107547 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:47.107559 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:47.107572 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:47.107584 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:47 GMT
I0223 04:37:47.107701 154064 request.go:1171] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","resourceVersion":"309","creationTimestamp":"2023-02-23T04:37:28Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-541903","kubernetes.io/os":"linux","minikube.k8s.io/commit":"66d56dc3ac28a702789778ac47e90f12526a0321","minikube.k8s.io/name":"multinode-541903","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_02_23T04_37_32_0700","minikube.k8s.io/version":"v1.29.0","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/cri-dockerd.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","api
Version":"v1","time":"2023-02-23T04:37:28Z","fieldsType":"FieldsV1","fi [truncated 4999 chars]
I0223 04:37:47.608197 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-proxy-b9nwm
I0223 04:37:47.608216 154064 round_trippers.go:469] Request Headers:
I0223 04:37:47.608225 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:47.608231 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:47.609916 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:37:47.609937 154064 round_trippers.go:577] Response Headers:
I0223 04:37:47.609947 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:47.609953 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:47.609962 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:47.609971 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:47 GMT
I0223 04:37:47.609984 154064 round_trippers.go:580] Audit-Id: cba934a8-21b0-4cf0-a5e6-520fd7551e06
I0223 04:37:47.609996 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:47.610081 154064 request.go:1171] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"kube-proxy-b9nwm","generateName":"kube-proxy-","namespace":"kube-system","uid":"aa99444f-90a3-4ea0-98ff-ccd3241d4a2c","resourceVersion":"326","creationTimestamp":"2023-02-23T04:37:44Z","labels":{"controller-revision-hash":"6bc4695d8c","k8s-app":"kube-proxy","pod-template-generation":"1"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"DaemonSet","name":"kube-proxy","uid":"20ec477f-9776-4ad2-ac05-cf498eae4952","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-02-23T04:37:44Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:controller-revision-hash":{},"f:k8s-app":{},"f:pod-template-generation":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"20ec477f-9776-4ad2-ac05-cf498eae4952\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:nodeAffinity":{".":{},"f:r
equiredDuringSchedulingIgnoredDuringExecution":{}}},"f:containers":{"k: [truncated 5540 chars]
I0223 04:37:47.610583 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-541903
I0223 04:37:47.610601 154064 round_trippers.go:469] Request Headers:
I0223 04:37:47.610612 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:47.610624 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:47.612261 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:37:47.612282 154064 round_trippers.go:577] Response Headers:
I0223 04:37:47.612293 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:47.612302 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:47.612311 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:47.612323 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:47.612338 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:47 GMT
I0223 04:37:47.612343 154064 round_trippers.go:580] Audit-Id: 5fa3f05d-5200-46af-baf8-b8b3d656e0f0
I0223 04:37:47.612416 154064 request.go:1171] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","resourceVersion":"309","creationTimestamp":"2023-02-23T04:37:28Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-541903","kubernetes.io/os":"linux","minikube.k8s.io/commit":"66d56dc3ac28a702789778ac47e90f12526a0321","minikube.k8s.io/name":"multinode-541903","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_02_23T04_37_32_0700","minikube.k8s.io/version":"v1.29.0","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/cri-dockerd.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","api
Version":"v1","time":"2023-02-23T04:37:28Z","fieldsType":"FieldsV1","fi [truncated 4999 chars]
I0223 04:37:48.109017 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-proxy-b9nwm
I0223 04:37:48.109039 154064 round_trippers.go:469] Request Headers:
I0223 04:37:48.109047 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:48.109053 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:48.111103 154064 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I0223 04:37:48.111119 154064 round_trippers.go:577] Response Headers:
I0223 04:37:48.111126 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:48.111132 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:48.111137 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:48 GMT
I0223 04:37:48.111142 154064 round_trippers.go:580] Audit-Id: a1416656-0086-4488-928e-51417be0968f
I0223 04:37:48.111147 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:48.111153 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:48.111231 154064 request.go:1171] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"kube-proxy-b9nwm","generateName":"kube-proxy-","namespace":"kube-system","uid":"aa99444f-90a3-4ea0-98ff-ccd3241d4a2c","resourceVersion":"326","creationTimestamp":"2023-02-23T04:37:44Z","labels":{"controller-revision-hash":"6bc4695d8c","k8s-app":"kube-proxy","pod-template-generation":"1"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"DaemonSet","name":"kube-proxy","uid":"20ec477f-9776-4ad2-ac05-cf498eae4952","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-02-23T04:37:44Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:controller-revision-hash":{},"f:k8s-app":{},"f:pod-template-generation":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"20ec477f-9776-4ad2-ac05-cf498eae4952\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:nodeAffinity":{".":{},"f:r
equiredDuringSchedulingIgnoredDuringExecution":{}}},"f:containers":{"k: [truncated 5540 chars]
I0223 04:37:48.111597 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-541903
I0223 04:37:48.111606 154064 round_trippers.go:469] Request Headers:
I0223 04:37:48.111613 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:48.111619 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:48.113349 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:37:48.113380 154064 round_trippers.go:577] Response Headers:
I0223 04:37:48.113390 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:48.113404 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:48.113416 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:48.113427 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:48.113436 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:48 GMT
I0223 04:37:48.113445 154064 round_trippers.go:580] Audit-Id: 48fbcc04-55f0-4be1-9320-d41f371835f2
I0223 04:37:48.113525 154064 request.go:1171] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","resourceVersion":"309","creationTimestamp":"2023-02-23T04:37:28Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-541903","kubernetes.io/os":"linux","minikube.k8s.io/commit":"66d56dc3ac28a702789778ac47e90f12526a0321","minikube.k8s.io/name":"multinode-541903","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_02_23T04_37_32_0700","minikube.k8s.io/version":"v1.29.0","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/cri-dockerd.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","api
Version":"v1","time":"2023-02-23T04:37:28Z","fieldsType":"FieldsV1","fi [truncated 4999 chars]
I0223 04:37:48.608302 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-proxy-b9nwm
I0223 04:37:48.608322 154064 round_trippers.go:469] Request Headers:
I0223 04:37:48.608329 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:48.608336 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:48.610020 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:37:48.610039 154064 round_trippers.go:577] Response Headers:
I0223 04:37:48.610046 154064 round_trippers.go:580] Audit-Id: f325821f-9ea2-4ac4-ae38-ec6f2067dff4
I0223 04:37:48.610052 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:48.610058 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:48.610063 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:48.610069 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:48.610082 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:48 GMT
I0223 04:37:48.610198 154064 request.go:1171] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"kube-proxy-b9nwm","generateName":"kube-proxy-","namespace":"kube-system","uid":"aa99444f-90a3-4ea0-98ff-ccd3241d4a2c","resourceVersion":"387","creationTimestamp":"2023-02-23T04:37:44Z","labels":{"controller-revision-hash":"6bc4695d8c","k8s-app":"kube-proxy","pod-template-generation":"1"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"DaemonSet","name":"kube-proxy","uid":"20ec477f-9776-4ad2-ac05-cf498eae4952","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-02-23T04:37:44Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:controller-revision-hash":{},"f:k8s-app":{},"f:pod-template-generation":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"20ec477f-9776-4ad2-ac05-cf498eae4952\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:nodeAffinity":{".":{},"f:r
equiredDuringSchedulingIgnoredDuringExecution":{}}},"f:containers":{"k: [truncated 5529 chars]
I0223 04:37:48.610690 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-541903
I0223 04:37:48.610704 154064 round_trippers.go:469] Request Headers:
I0223 04:37:48.610715 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:48.610725 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:48.612289 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:37:48.612305 154064 round_trippers.go:577] Response Headers:
I0223 04:37:48.612312 154064 round_trippers.go:580] Audit-Id: 75d356aa-c7bc-43b7-99bf-e12cb14a6f75
I0223 04:37:48.612320 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:48.612325 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:48.612333 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:48.612341 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:48.612353 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:48 GMT
I0223 04:37:48.612480 154064 request.go:1171] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","resourceVersion":"309","creationTimestamp":"2023-02-23T04:37:28Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-541903","kubernetes.io/os":"linux","minikube.k8s.io/commit":"66d56dc3ac28a702789778ac47e90f12526a0321","minikube.k8s.io/name":"multinode-541903","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_02_23T04_37_32_0700","minikube.k8s.io/version":"v1.29.0","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/cri-dockerd.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","api
Version":"v1","time":"2023-02-23T04:37:28Z","fieldsType":"FieldsV1","fi [truncated 4999 chars]
I0223 04:37:48.612782 154064 pod_ready.go:92] pod "kube-proxy-b9nwm" in "kube-system" namespace has status "Ready":"True"
I0223 04:37:48.612804 154064 pod_ready.go:81] duration metric: took 1.509246914s waiting for pod "kube-proxy-b9nwm" in "kube-system" namespace to be "Ready" ...
I0223 04:37:48.612819 154064 pod_ready.go:78] waiting up to 6m0s for pod "kube-scheduler-multinode-541903" in "kube-system" namespace to be "Ready" ...
I0223 04:37:48.612871 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-scheduler-multinode-541903
I0223 04:37:48.612882 154064 round_trippers.go:469] Request Headers:
I0223 04:37:48.612894 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:48.612908 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:48.614390 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:37:48.614404 154064 round_trippers.go:577] Response Headers:
I0223 04:37:48.614411 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:48 GMT
I0223 04:37:48.614421 154064 round_trippers.go:580] Audit-Id: 96b74762-2c9d-4083-a736-916117e738ab
I0223 04:37:48.614432 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:48.614440 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:48.614451 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:48.614458 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:48.614544 154064 request.go:1171] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"kube-scheduler-multinode-541903","namespace":"kube-system","uid":"a9afc7a6-806a-415e-b3b0-747daf5498e8","resourceVersion":"382","creationTimestamp":"2023-02-23T04:37:32Z","labels":{"component":"kube-scheduler","tier":"control-plane"},"annotations":{"kubernetes.io/config.hash":"1101f4c1eb93d10ee8052cff434fb92a","kubernetes.io/config.mirror":"1101f4c1eb93d10ee8052cff434fb92a","kubernetes.io/config.seen":"2023-02-23T04:37:31.811116783Z","kubernetes.io/config.source":"file"},"ownerReferences":[{"apiVersion":"v1","kind":"Node","name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","controller":true}],"managedFields":[{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-02-23T04:37:32Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:kubernetes.io/config.hash":{},"f:kubernetes.io/config.mirror":{},"f:kubernetes.io/config.seen":{},
"f:kubernetes.io/config.source":{}},"f:labels":{".":{},"f:component":{} [truncated 4679 chars]
I0223 04:37:48.614878 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-541903
I0223 04:37:48.614889 154064 round_trippers.go:469] Request Headers:
I0223 04:37:48.614896 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:48.614903 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:48.616311 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:37:48.616331 154064 round_trippers.go:577] Response Headers:
I0223 04:37:48.616338 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:48 GMT
I0223 04:37:48.616344 154064 round_trippers.go:580] Audit-Id: b4a37b7a-f30d-45af-bb36-ae0c5dada766
I0223 04:37:48.616354 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:48.616363 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:48.616376 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:48.616383 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:48.616488 154064 request.go:1171] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","resourceVersion":"309","creationTimestamp":"2023-02-23T04:37:28Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-541903","kubernetes.io/os":"linux","minikube.k8s.io/commit":"66d56dc3ac28a702789778ac47e90f12526a0321","minikube.k8s.io/name":"multinode-541903","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_02_23T04_37_32_0700","minikube.k8s.io/version":"v1.29.0","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/cri-dockerd.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","api
Version":"v1","time":"2023-02-23T04:37:28Z","fieldsType":"FieldsV1","fi [truncated 4999 chars]
I0223 04:37:48.616774 154064 pod_ready.go:92] pod "kube-scheduler-multinode-541903" in "kube-system" namespace has status "Ready":"True"
I0223 04:37:48.616784 154064 pod_ready.go:81] duration metric: took 3.957157ms waiting for pod "kube-scheduler-multinode-541903" in "kube-system" namespace to be "Ready" ...
I0223 04:37:48.616790 154064 pod_ready.go:38] duration metric: took 4.058169416s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0223 04:37:48.616809 154064 api_server.go:51] waiting for apiserver process to appear ...
I0223 04:37:48.616854 154064 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0223 04:37:48.626542 154064 command_runner.go:130] > 2075
I0223 04:37:48.626559 154064 api_server.go:71] duration metric: took 4.131061694s to wait for apiserver process to appear ...
I0223 04:37:48.626566 154064 api_server.go:87] waiting for apiserver healthz status ...
I0223 04:37:48.626574 154064 api_server.go:252] Checking apiserver healthz at https://192.168.58.2:8443/healthz ...
I0223 04:37:48.630874 154064 api_server.go:278] https://192.168.58.2:8443/healthz returned 200:
ok
I0223 04:37:48.630915 154064 round_trippers.go:463] GET https://192.168.58.2:8443/version
I0223 04:37:48.630919 154064 round_trippers.go:469] Request Headers:
I0223 04:37:48.630926 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:48.630933 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:48.631469 154064 round_trippers.go:574] Response Status: 200 OK in 0 milliseconds
I0223 04:37:48.631484 154064 round_trippers.go:577] Response Headers:
I0223 04:37:48.631491 154064 round_trippers.go:580] Audit-Id: 7c8b8cfe-bf79-49bb-a590-e5fbcbf2adbb
I0223 04:37:48.631496 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:48.631502 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:48.631510 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:48.631516 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:48.631527 154064 round_trippers.go:580] Content-Length: 263
I0223 04:37:48.631532 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:48 GMT
I0223 04:37:48.631566 154064 request.go:1171] Response Body: {
"major": "1",
"minor": "26",
"gitVersion": "v1.26.1",
"gitCommit": "8f94681cd294aa8cfd3407b8191f6c70214973a4",
"gitTreeState": "clean",
"buildDate": "2023-01-18T15:51:25Z",
"goVersion": "go1.19.5",
"compiler": "gc",
"platform": "linux/amd64"
}
I0223 04:37:48.631636 154064 api_server.go:140] control plane version: v1.26.1
I0223 04:37:48.631652 154064 api_server.go:130] duration metric: took 5.080233ms to wait for apiserver health ...
I0223 04:37:48.631658 154064 system_pods.go:43] waiting for kube-system pods to appear ...
I0223 04:37:48.631697 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods
I0223 04:37:48.631704 154064 round_trippers.go:469] Request Headers:
I0223 04:37:48.631710 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:48.631719 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:48.634123 154064 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I0223 04:37:48.634142 154064 round_trippers.go:577] Response Headers:
I0223 04:37:48.634151 154064 round_trippers.go:580] Audit-Id: de73cccb-0d5f-4a3c-b53b-1bbd38721470
I0223 04:37:48.634160 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:48.634170 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:48.634183 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:48.634194 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:48.634208 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:48 GMT
I0223 04:37:48.634676 154064 request.go:1171] Response Body: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"388"},"items":[{"metadata":{"name":"coredns-787d4945fb-m9kd9","generateName":"coredns-787d4945fb-","namespace":"kube-system","uid":"f98b834d-59f4-4ad0-8fc5-942542e67c77","resourceVersion":"363","creationTimestamp":"2023-02-23T04:37:44Z","labels":{"k8s-app":"kube-dns","pod-template-hash":"787d4945fb"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"ReplicaSet","name":"coredns-787d4945fb","uid":"172875b3-c124-4a60-93b2-e8672eacefc6","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-02-23T04:37:44Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:k8s-app":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"172875b3-c124-4a60-93b2-e8672eacefc6\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:podAntiAffinity":{".":{},"f
:preferredDuringSchedulingIgnoredDuringExecution":{}}},"f:containers":{ [truncated 61703 chars]
I0223 04:37:48.636548 154064 system_pods.go:59] 9 kube-system pods found
I0223 04:37:48.636572 154064 system_pods.go:61] "coredns-787d4945fb-m9kd9" [f98b834d-59f4-4ad0-8fc5-942542e67c77] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I0223 04:37:48.636579 154064 system_pods.go:61] "coredns-787d4945fb-qcmgw" [326c565c-307c-4df8-9dc7-e3937a6ed2f6] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I0223 04:37:48.636587 154064 system_pods.go:61] "etcd-multinode-541903" [d4f461e5-807f-4ab5-9619-f9678e053114] Running
I0223 04:37:48.636593 154064 system_pods.go:61] "kindnet-gnlxp" [01074f06-74cd-4299-8f85-89f3a5f58573] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni])
I0223 04:37:48.636601 154064 system_pods.go:61] "kube-apiserver-multinode-541903" [4dac4fc4-a548-48fb-a9d3-2caab1386063] Running
I0223 04:37:48.636606 154064 system_pods.go:61] "kube-controller-manager-multinode-541903" [e94928dd-b122-471e-b5df-f9df230f4d39] Running
I0223 04:37:48.636609 154064 system_pods.go:61] "kube-proxy-b9nwm" [aa99444f-90a3-4ea0-98ff-ccd3241d4a2c] Running
I0223 04:37:48.636616 154064 system_pods.go:61] "kube-scheduler-multinode-541903" [a9afc7a6-806a-415e-b3b0-747daf5498e8] Running
I0223 04:37:48.636622 154064 system_pods.go:61] "storage-provisioner" [2b794578-dda7-4230-9792-79a6a174f39d] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I0223 04:37:48.636629 154064 system_pods.go:74] duration metric: took 4.967359ms to wait for pod list to return data ...
I0223 04:37:48.636637 154064 default_sa.go:34] waiting for default service account to be created ...
I0223 04:37:48.636675 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/default/serviceaccounts
I0223 04:37:48.636684 154064 round_trippers.go:469] Request Headers:
I0223 04:37:48.636694 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:48.636705 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:48.638049 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:37:48.638064 154064 round_trippers.go:577] Response Headers:
I0223 04:37:48.638070 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:48.638076 154064 round_trippers.go:580] Content-Length: 261
I0223 04:37:48.638082 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:48 GMT
I0223 04:37:48.638087 154064 round_trippers.go:580] Audit-Id: 9509c774-c9a9-47e5-a969-7aaa2aa8a548
I0223 04:37:48.638093 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:48.638099 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:48.638105 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:48.638122 154064 request.go:1171] Response Body: {"kind":"ServiceAccountList","apiVersion":"v1","metadata":{"resourceVersion":"388"},"items":[{"metadata":{"name":"default","namespace":"default","uid":"54b8610d-dd77-444a-9e83-615ee5dab34e","resourceVersion":"310","creationTimestamp":"2023-02-23T04:37:44Z"}}]}
I0223 04:37:48.638255 154064 default_sa.go:45] found service account: "default"
I0223 04:37:48.638264 154064 default_sa.go:55] duration metric: took 1.623197ms for default service account to be created ...
I0223 04:37:48.638269 154064 system_pods.go:116] waiting for k8s-apps to be running ...
I0223 04:37:48.756585 154064 request.go:622] Waited for 118.2716ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods
I0223 04:37:48.756628 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods
I0223 04:37:48.756649 154064 round_trippers.go:469] Request Headers:
I0223 04:37:48.756657 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:48.756663 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:48.759146 154064 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I0223 04:37:48.759164 154064 round_trippers.go:577] Response Headers:
I0223 04:37:48.759171 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:48 GMT
I0223 04:37:48.759177 154064 round_trippers.go:580] Audit-Id: ce7361c0-3264-484d-a762-07c67252053d
I0223 04:37:48.759184 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:48.759190 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:48.759198 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:48.759204 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:48.759565 154064 request.go:1171] Response Body: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"390"},"items":[{"metadata":{"name":"coredns-787d4945fb-m9kd9","generateName":"coredns-787d4945fb-","namespace":"kube-system","uid":"f98b834d-59f4-4ad0-8fc5-942542e67c77","resourceVersion":"389","creationTimestamp":"2023-02-23T04:37:44Z","labels":{"k8s-app":"kube-dns","pod-template-hash":"787d4945fb"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"ReplicaSet","name":"coredns-787d4945fb","uid":"172875b3-c124-4a60-93b2-e8672eacefc6","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-02-23T04:37:44Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:k8s-app":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"172875b3-c124-4a60-93b2-e8672eacefc6\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:podAntiAffinity":{".":{},"f
:preferredDuringSchedulingIgnoredDuringExecution":{}}},"f:containers":{ [truncated 61932 chars]
I0223 04:37:48.761402 154064 system_pods.go:86] 9 kube-system pods found
I0223 04:37:48.761423 154064 system_pods.go:89] "coredns-787d4945fb-m9kd9" [f98b834d-59f4-4ad0-8fc5-942542e67c77] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I0223 04:37:48.761430 154064 system_pods.go:89] "coredns-787d4945fb-qcmgw" [326c565c-307c-4df8-9dc7-e3937a6ed2f6] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I0223 04:37:48.761434 154064 system_pods.go:89] "etcd-multinode-541903" [d4f461e5-807f-4ab5-9619-f9678e053114] Running
I0223 04:37:48.761440 154064 system_pods.go:89] "kindnet-gnlxp" [01074f06-74cd-4299-8f85-89f3a5f58573] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni])
I0223 04:37:48.761445 154064 system_pods.go:89] "kube-apiserver-multinode-541903" [4dac4fc4-a548-48fb-a9d3-2caab1386063] Running
I0223 04:37:48.761450 154064 system_pods.go:89] "kube-controller-manager-multinode-541903" [e94928dd-b122-471e-b5df-f9df230f4d39] Running
I0223 04:37:48.761460 154064 system_pods.go:89] "kube-proxy-b9nwm" [aa99444f-90a3-4ea0-98ff-ccd3241d4a2c] Running
I0223 04:37:48.761464 154064 system_pods.go:89] "kube-scheduler-multinode-541903" [a9afc7a6-806a-415e-b3b0-747daf5498e8] Running
I0223 04:37:48.761469 154064 system_pods.go:89] "storage-provisioner" [2b794578-dda7-4230-9792-79a6a174f39d] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I0223 04:37:48.761478 154064 system_pods.go:126] duration metric: took 123.204594ms to wait for k8s-apps to be running ...
I0223 04:37:48.761486 154064 system_svc.go:44] waiting for kubelet service to be running ....
I0223 04:37:48.761521 154064 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0223 04:37:48.770394 154064 system_svc.go:56] duration metric: took 8.900672ms WaitForService to wait for kubelet.
I0223 04:37:48.770423 154064 kubeadm.go:578] duration metric: took 4.274924711s to wait for : map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] ...
I0223 04:37:48.770440 154064 node_conditions.go:102] verifying NodePressure condition ...
I0223 04:37:48.956859 154064 request.go:622] Waited for 186.344723ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.58.2:8443/api/v1/nodes
I0223 04:37:48.956907 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes
I0223 04:37:48.956921 154064 round_trippers.go:469] Request Headers:
I0223 04:37:48.956929 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:37:48.956936 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:37:48.958678 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:37:48.958697 154064 round_trippers.go:577] Response Headers:
I0223 04:37:48.958707 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:37:48.958717 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:37:48 GMT
I0223 04:37:48.958729 154064 round_trippers.go:580] Audit-Id: 9068183e-2463-4a9f-8cdc-ee0c48dca4a0
I0223 04:37:48.958740 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:37:48.958746 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:37:48.958754 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:37:48.958874 154064 request.go:1171] Response Body: {"kind":"NodeList","apiVersion":"v1","metadata":{"resourceVersion":"390"},"items":[{"metadata":{"name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","resourceVersion":"309","creationTimestamp":"2023-02-23T04:37:28Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-541903","kubernetes.io/os":"linux","minikube.k8s.io/commit":"66d56dc3ac28a702789778ac47e90f12526a0321","minikube.k8s.io/name":"multinode-541903","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_02_23T04_37_32_0700","minikube.k8s.io/version":"v1.29.0","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/cri-dockerd.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFiel
ds":[{"manager":"kubelet","operation":"Update","apiVersion":"v1","time" [truncated 5052 chars]
I0223 04:37:48.959231 154064 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0223 04:37:48.959247 154064 node_conditions.go:123] node cpu capacity is 8
I0223 04:37:48.959259 154064 node_conditions.go:105] duration metric: took 188.815169ms to run NodePressure ...
I0223 04:37:48.959275 154064 start.go:228] waiting for startup goroutines ...
I0223 04:37:48.959286 154064 start.go:233] waiting for cluster config update ...
I0223 04:37:48.959298 154064 start.go:242] writing updated cluster config ...
I0223 04:37:48.961659 154064 out.go:177]
I0223 04:37:48.963086 154064 config.go:182] Loaded profile config "multinode-541903": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.26.1
I0223 04:37:48.963168 154064 profile.go:148] Saving config to /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/config.json ...
I0223 04:37:48.964921 154064 out.go:177] * Starting worker node multinode-541903-m02 in cluster multinode-541903
I0223 04:37:48.966332 154064 cache.go:120] Beginning downloading kic base image for docker with docker
I0223 04:37:48.967831 154064 out.go:177] * Pulling base image ...
I0223 04:37:48.969549 154064 preload.go:132] Checking if preload exists for k8s version v1.26.1 and runtime docker
I0223 04:37:48.969567 154064 cache.go:57] Caching tarball of preloaded images
I0223 04:37:48.969628 154064 image.go:77] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.37-1676506612-15768@sha256:cc1cb283879fedae93096946a6953a50075ed680d467a47cbf669e0ed7d3aebc in local docker daemon
I0223 04:37:48.969664 154064 preload.go:174] Found /home/jenkins/minikube-integration/15909-3701/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.26.1-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I0223 04:37:48.969680 154064 cache.go:60] Finished verifying existence of preloaded tar for v1.26.1 on docker
I0223 04:37:48.969773 154064 profile.go:148] Saving config to /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/config.json ...
I0223 04:37:49.032515 154064 image.go:81] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.37-1676506612-15768@sha256:cc1cb283879fedae93096946a6953a50075ed680d467a47cbf669e0ed7d3aebc in local docker daemon, skipping pull
I0223 04:37:49.032539 154064 cache.go:143] gcr.io/k8s-minikube/kicbase-builds:v0.0.37-1676506612-15768@sha256:cc1cb283879fedae93096946a6953a50075ed680d467a47cbf669e0ed7d3aebc exists in daemon, skipping load
I0223 04:37:49.032552 154064 cache.go:193] Successfully downloaded all kic artifacts
I0223 04:37:49.032587 154064 start.go:364] acquiring machines lock for multinode-541903-m02: {Name:mk0e6f7f2485b200fe4c13cd9d7e9e506bead925 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0223 04:37:49.032693 154064 start.go:368] acquired machines lock for "multinode-541903-m02" in 84.244µs
I0223 04:37:49.032719 154064 start.go:93] Provisioning new machine with config: &{Name:multinode-541903 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.37-1676506612-15768@sha256:cc1cb283879fedae93096946a6953a50075ed680d467a47cbf669e0ed7d3aebc Memory:2200 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.26.1 ClusterName:multinode-541903 Namespace:default APIServerName:miniku
beCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP:192.168.58.2 Port:8443 KubernetesVersion:v1.26.1 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m02 IP: Port:0 KubernetesVersion:v1.26.1 ContainerRuntime:docker ControlPlane:false Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-ho
st Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP:} &{Name:m02 IP: Port:0 KubernetesVersion:v1.26.1 ContainerRuntime:docker ControlPlane:false Worker:true}
I0223 04:37:49.032812 154064 start.go:125] createHost starting for "m02" (driver="docker")
I0223 04:37:49.035027 154064 out.go:204] * Creating docker container (CPUs=2, Memory=2200MB) ...
I0223 04:37:49.035124 154064 start.go:159] libmachine.API.Create for "multinode-541903" (driver="docker")
I0223 04:37:49.035147 154064 client.go:168] LocalClient.Create starting
I0223 04:37:49.035226 154064 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/15909-3701/.minikube/certs/ca.pem
I0223 04:37:49.035263 154064 main.go:141] libmachine: Decoding PEM data...
I0223 04:37:49.035288 154064 main.go:141] libmachine: Parsing certificate...
I0223 04:37:49.035355 154064 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/15909-3701/.minikube/certs/cert.pem
I0223 04:37:49.035381 154064 main.go:141] libmachine: Decoding PEM data...
I0223 04:37:49.035399 154064 main.go:141] libmachine: Parsing certificate...
I0223 04:37:49.035615 154064 cli_runner.go:164] Run: docker network inspect multinode-541903 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0223 04:37:49.097395 154064 network_create.go:76] Found existing network {name:multinode-541903 subnet:0xc00162aba0 gateway:[0 0 0 0 0 0 0 0 0 0 255 255 192 168 58 1] mtu:1500}
I0223 04:37:49.097442 154064 kic.go:117] calculated static IP "192.168.58.3" for the "multinode-541903-m02" container
I0223 04:37:49.097505 154064 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0223 04:37:49.158874 154064 cli_runner.go:164] Run: docker volume create multinode-541903-m02 --label name.minikube.sigs.k8s.io=multinode-541903-m02 --label created_by.minikube.sigs.k8s.io=true
I0223 04:37:49.219181 154064 oci.go:103] Successfully created a docker volume multinode-541903-m02
I0223 04:37:49.219243 154064 cli_runner.go:164] Run: docker run --rm --name multinode-541903-m02-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=multinode-541903-m02 --entrypoint /usr/bin/test -v multinode-541903-m02:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.37-1676506612-15768@sha256:cc1cb283879fedae93096946a6953a50075ed680d467a47cbf669e0ed7d3aebc -d /var/lib
I0223 04:37:49.805888 154064 oci.go:107] Successfully prepared a docker volume multinode-541903-m02
I0223 04:37:49.805929 154064 preload.go:132] Checking if preload exists for k8s version v1.26.1 and runtime docker
I0223 04:37:49.805952 154064 kic.go:190] Starting extracting preloaded images to volume ...
I0223 04:37:49.806008 154064 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/15909-3701/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.26.1-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v multinode-541903-m02:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.37-1676506612-15768@sha256:cc1cb283879fedae93096946a6953a50075ed680d467a47cbf669e0ed7d3aebc -I lz4 -xf /preloaded.tar -C /extractDir
I0223 04:37:54.753133 154064 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/15909-3701/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.26.1-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v multinode-541903-m02:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.37-1676506612-15768@sha256:cc1cb283879fedae93096946a6953a50075ed680d467a47cbf669e0ed7d3aebc -I lz4 -xf /preloaded.tar -C /extractDir: (4.947078735s)
I0223 04:37:54.753165 154064 kic.go:199] duration metric: took 4.947212 seconds to extract preloaded images to volume
W0223 04:37:54.753298 154064 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I0223 04:37:54.753394 154064 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0223 04:37:54.882603 154064 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname multinode-541903-m02 --name multinode-541903-m02 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=multinode-541903-m02 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=multinode-541903-m02 --network multinode-541903 --ip 192.168.58.3 --volume multinode-541903-m02:/var --security-opt apparmor=unconfined --memory=2200mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.37-1676506612-15768@sha256:cc1cb283879fedae93096946a6953a50075ed680d467a47cbf669e0ed7d3aebc
I0223 04:37:55.375896 154064 cli_runner.go:164] Run: docker container inspect multinode-541903-m02 --format={{.State.Running}}
I0223 04:37:55.461784 154064 cli_runner.go:164] Run: docker container inspect multinode-541903-m02 --format={{.State.Status}}
I0223 04:37:55.542604 154064 cli_runner.go:164] Run: docker exec multinode-541903-m02 stat /var/lib/dpkg/alternatives/iptables
I0223 04:37:55.673626 154064 oci.go:144] the created container "multinode-541903-m02" has a running status.
I0223 04:37:55.673652 154064 kic.go:221] Creating ssh key for kic: /home/jenkins/minikube-integration/15909-3701/.minikube/machines/multinode-541903-m02/id_rsa...
I0223 04:37:55.770627 154064 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/15909-3701/.minikube/machines/multinode-541903-m02/id_rsa.pub -> /home/docker/.ssh/authorized_keys
I0223 04:37:55.770667 154064 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/15909-3701/.minikube/machines/multinode-541903-m02/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0223 04:37:55.930419 154064 cli_runner.go:164] Run: docker container inspect multinode-541903-m02 --format={{.State.Status}}
I0223 04:37:56.021133 154064 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0223 04:37:56.021159 154064 kic_runner.go:114] Args: [docker exec --privileged multinode-541903-m02 chown docker:docker /home/docker/.ssh/authorized_keys]
I0223 04:37:56.152111 154064 cli_runner.go:164] Run: docker container inspect multinode-541903-m02 --format={{.State.Status}}
I0223 04:37:56.230519 154064 machine.go:88] provisioning docker machine ...
I0223 04:37:56.230559 154064 ubuntu.go:169] provisioning hostname "multinode-541903-m02"
I0223 04:37:56.230618 154064 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-541903-m02
I0223 04:37:56.300579 154064 main.go:141] libmachine: Using SSH client type: native
I0223 04:37:56.301243 154064 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x17560a0] 0x1759120 <nil> [] 0s} 127.0.0.1 32860 <nil> <nil>}
I0223 04:37:56.301267 154064 main.go:141] libmachine: About to run SSH command:
sudo hostname multinode-541903-m02 && echo "multinode-541903-m02" | sudo tee /etc/hostname
I0223 04:37:56.450959 154064 main.go:141] libmachine: SSH cmd err, output: <nil>: multinode-541903-m02
I0223 04:37:56.451042 154064 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-541903-m02
I0223 04:37:56.526924 154064 main.go:141] libmachine: Using SSH client type: native
I0223 04:37:56.527560 154064 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x17560a0] 0x1759120 <nil> [] 0s} 127.0.0.1 32860 <nil> <nil>}
I0223 04:37:56.527589 154064 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\smultinode-541903-m02' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 multinode-541903-m02/g' /etc/hosts;
else
echo '127.0.1.1 multinode-541903-m02' | sudo tee -a /etc/hosts;
fi
fi
I0223 04:37:56.663115 154064 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0223 04:37:56.663145 154064 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/15909-3701/.minikube CaCertPath:/home/jenkins/minikube-integration/15909-3701/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/15909-3701/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/15909-3701/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/15909-3701/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/15909-3701/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/15909-3701/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/15909-3701/.minikube}
I0223 04:37:56.663165 154064 ubuntu.go:177] setting up certificates
I0223 04:37:56.663177 154064 provision.go:83] configureAuth start
I0223 04:37:56.663240 154064 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" multinode-541903-m02
I0223 04:37:56.739869 154064 provision.go:138] copyHostCerts
I0223 04:37:56.739937 154064 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/15909-3701/.minikube/certs/ca.pem -> /home/jenkins/minikube-integration/15909-3701/.minikube/ca.pem
I0223 04:37:56.739969 154064 exec_runner.go:144] found /home/jenkins/minikube-integration/15909-3701/.minikube/ca.pem, removing ...
I0223 04:37:56.739976 154064 exec_runner.go:207] rm: /home/jenkins/minikube-integration/15909-3701/.minikube/ca.pem
I0223 04:37:56.740048 154064 exec_runner.go:151] cp: /home/jenkins/minikube-integration/15909-3701/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/15909-3701/.minikube/ca.pem (1082 bytes)
I0223 04:37:56.740137 154064 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/15909-3701/.minikube/certs/cert.pem -> /home/jenkins/minikube-integration/15909-3701/.minikube/cert.pem
I0223 04:37:56.740156 154064 exec_runner.go:144] found /home/jenkins/minikube-integration/15909-3701/.minikube/cert.pem, removing ...
I0223 04:37:56.740161 154064 exec_runner.go:207] rm: /home/jenkins/minikube-integration/15909-3701/.minikube/cert.pem
I0223 04:37:56.740193 154064 exec_runner.go:151] cp: /home/jenkins/minikube-integration/15909-3701/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/15909-3701/.minikube/cert.pem (1123 bytes)
I0223 04:37:56.740241 154064 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/15909-3701/.minikube/certs/key.pem -> /home/jenkins/minikube-integration/15909-3701/.minikube/key.pem
I0223 04:37:56.740257 154064 exec_runner.go:144] found /home/jenkins/minikube-integration/15909-3701/.minikube/key.pem, removing ...
I0223 04:37:56.740261 154064 exec_runner.go:207] rm: /home/jenkins/minikube-integration/15909-3701/.minikube/key.pem
I0223 04:37:56.740286 154064 exec_runner.go:151] cp: /home/jenkins/minikube-integration/15909-3701/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/15909-3701/.minikube/key.pem (1675 bytes)
I0223 04:37:56.740345 154064 provision.go:112] generating server cert: /home/jenkins/minikube-integration/15909-3701/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/15909-3701/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/15909-3701/.minikube/certs/ca-key.pem org=jenkins.multinode-541903-m02 san=[192.168.58.3 127.0.0.1 localhost 127.0.0.1 minikube multinode-541903-m02]
I0223 04:37:56.834683 154064 provision.go:172] copyRemoteCerts
I0223 04:37:56.834746 154064 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0223 04:37:56.834791 154064 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-541903-m02
I0223 04:37:56.911101 154064 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32860 SSHKeyPath:/home/jenkins/minikube-integration/15909-3701/.minikube/machines/multinode-541903-m02/id_rsa Username:docker}
I0223 04:37:57.007257 154064 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/15909-3701/.minikube/certs/ca.pem -> /etc/docker/ca.pem
I0223 04:37:57.007326 154064 ssh_runner.go:362] scp /home/jenkins/minikube-integration/15909-3701/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I0223 04:37:57.025167 154064 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/15909-3701/.minikube/machines/server.pem -> /etc/docker/server.pem
I0223 04:37:57.025222 154064 ssh_runner.go:362] scp /home/jenkins/minikube-integration/15909-3701/.minikube/machines/server.pem --> /etc/docker/server.pem (1237 bytes)
I0223 04:37:57.042899 154064 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/15909-3701/.minikube/machines/server-key.pem -> /etc/docker/server-key.pem
I0223 04:37:57.042951 154064 ssh_runner.go:362] scp /home/jenkins/minikube-integration/15909-3701/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I0223 04:37:57.060889 154064 provision.go:86] duration metric: configureAuth took 397.697635ms
I0223 04:37:57.060917 154064 ubuntu.go:193] setting minikube options for container-runtime
I0223 04:37:57.061062 154064 config.go:182] Loaded profile config "multinode-541903": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.26.1
I0223 04:37:57.061102 154064 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-541903-m02
I0223 04:37:57.143384 154064 main.go:141] libmachine: Using SSH client type: native
I0223 04:37:57.144054 154064 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x17560a0] 0x1759120 <nil> [] 0s} 127.0.0.1 32860 <nil> <nil>}
I0223 04:37:57.144074 154064 main.go:141] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I0223 04:37:57.279416 154064 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
I0223 04:37:57.279438 154064 ubuntu.go:71] root file system type: overlay
I0223 04:37:57.279533 154064 provision.go:309] Updating docker unit: /lib/systemd/system/docker.service ...
I0223 04:37:57.279586 154064 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-541903-m02
I0223 04:37:57.355657 154064 main.go:141] libmachine: Using SSH client type: native
I0223 04:37:57.356082 154064 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x17560a0] 0x1759120 <nil> [] 0s} 127.0.0.1 32860 <nil> <nil>}
I0223 04:37:57.356163 154064 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %!s(MISSING) "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
Environment="NO_PROXY=192.168.58.2"
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I0223 04:37:57.501234 154064 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
Environment=NO_PROXY=192.168.58.2
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
I0223 04:37:57.501322 154064 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-541903-m02
I0223 04:37:57.580470 154064 main.go:141] libmachine: Using SSH client type: native
I0223 04:37:57.580888 154064 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x17560a0] 0x1759120 <nil> [] 0s} 127.0.0.1 32860 <nil> <nil>}
I0223 04:37:57.580907 154064 main.go:141] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I0223 04:37:58.282424 154064 main.go:141] libmachine: SSH cmd err, output: <nil>: --- /lib/systemd/system/docker.service 2023-02-09 19:46:56.000000000 +0000
+++ /lib/systemd/system/docker.service.new 2023-02-23 04:37:57.496815118 +0000
@@ -1,30 +1,33 @@
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
-After=network-online.target docker.socket firewalld.service containerd.service time-set.target
-Wants=network-online.target containerd.service
+BindsTo=containerd.service
+After=network-online.target firewalld.service containerd.service
+Wants=network-online.target
Requires=docker.socket
+StartLimitBurst=3
+StartLimitIntervalSec=60
[Service]
Type=notify
-# the default is not to use systemd for cgroups because the delegate issues still
-# exists and systemd currently does not support the cgroup feature set required
-# for containers run by docker
-ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
-ExecReload=/bin/kill -s HUP $MAINPID
-TimeoutStartSec=0
-RestartSec=2
-Restart=always
+Restart=on-failure
-# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
-# Both the old, and new location are accepted by systemd 229 and up, so using the old location
-# to make them work for either version of systemd.
-StartLimitBurst=3
+Environment=NO_PROXY=192.168.58.2
-# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
-# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
-# this option work for either version of systemd.
-StartLimitInterval=60s
+
+# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
+# The base configuration already specifies an 'ExecStart=...' command. The first directive
+# here is to clear out that command inherited from the base configuration. Without this,
+# the command from the base configuration and the command specified here are treated as
+# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
+# will catch this invalid input and refuse to start the service with an error like:
+# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
+
+# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
+# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
+ExecStart=
+ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
+ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
@@ -32,16 +35,16 @@
LimitNPROC=infinity
LimitCORE=infinity
-# Comment TasksMax if your systemd version does not support it.
-# Only systemd 226 and above support this option.
+# Uncomment TasksMax if your systemd version supports it.
+# Only systemd 226 and above support this version.
TasksMax=infinity
+TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
-OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable docker
I0223 04:37:58.282455 154064 machine.go:91] provisioned docker machine in 2.051910736s
I0223 04:37:58.282466 154064 client.go:171] LocalClient.Create took 9.247308851s
I0223 04:37:58.282487 154064 start.go:167] duration metric: libmachine.API.Create for "multinode-541903" took 9.247361956s
I0223 04:37:58.282498 154064 start.go:300] post-start starting for "multinode-541903-m02" (driver="docker")
I0223 04:37:58.282510 154064 start.go:328] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0223 04:37:58.282576 154064 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0223 04:37:58.282623 154064 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-541903-m02
I0223 04:37:58.361993 154064 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32860 SSHKeyPath:/home/jenkins/minikube-integration/15909-3701/.minikube/machines/multinode-541903-m02/id_rsa Username:docker}
I0223 04:37:58.455932 154064 ssh_runner.go:195] Run: cat /etc/os-release
I0223 04:37:58.458658 154064 command_runner.go:130] > NAME="Ubuntu"
I0223 04:37:58.458680 154064 command_runner.go:130] > VERSION="20.04.5 LTS (Focal Fossa)"
I0223 04:37:58.458688 154064 command_runner.go:130] > ID=ubuntu
I0223 04:37:58.458696 154064 command_runner.go:130] > ID_LIKE=debian
I0223 04:37:58.458704 154064 command_runner.go:130] > PRETTY_NAME="Ubuntu 20.04.5 LTS"
I0223 04:37:58.458711 154064 command_runner.go:130] > VERSION_ID="20.04"
I0223 04:37:58.458719 154064 command_runner.go:130] > HOME_URL="https://www.ubuntu.com/"
I0223 04:37:58.458729 154064 command_runner.go:130] > SUPPORT_URL="https://help.ubuntu.com/"
I0223 04:37:58.458738 154064 command_runner.go:130] > BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/"
I0223 04:37:58.458748 154064 command_runner.go:130] > PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy"
I0223 04:37:58.458757 154064 command_runner.go:130] > VERSION_CODENAME=focal
I0223 04:37:58.458761 154064 command_runner.go:130] > UBUNTU_CODENAME=focal
I0223 04:37:58.458824 154064 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0223 04:37:58.458837 154064 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0223 04:37:58.458845 154064 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0223 04:37:58.458851 154064 info.go:137] Remote host: Ubuntu 20.04.5 LTS
I0223 04:37:58.458858 154064 filesync.go:126] Scanning /home/jenkins/minikube-integration/15909-3701/.minikube/addons for local assets ...
I0223 04:37:58.458905 154064 filesync.go:126] Scanning /home/jenkins/minikube-integration/15909-3701/.minikube/files for local assets ...
I0223 04:37:58.458971 154064 filesync.go:149] local asset: /home/jenkins/minikube-integration/15909-3701/.minikube/files/etc/ssl/certs/103762.pem -> 103762.pem in /etc/ssl/certs
I0223 04:37:58.458980 154064 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/15909-3701/.minikube/files/etc/ssl/certs/103762.pem -> /etc/ssl/certs/103762.pem
I0223 04:37:58.459054 154064 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I0223 04:37:58.465267 154064 ssh_runner.go:362] scp /home/jenkins/minikube-integration/15909-3701/.minikube/files/etc/ssl/certs/103762.pem --> /etc/ssl/certs/103762.pem (1708 bytes)
I0223 04:37:58.482604 154064 start.go:303] post-start completed in 200.08935ms
I0223 04:37:58.482918 154064 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" multinode-541903-m02
I0223 04:37:58.563365 154064 profile.go:148] Saving config to /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/config.json ...
I0223 04:37:58.563616 154064 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0223 04:37:58.563657 154064 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-541903-m02
I0223 04:37:58.641272 154064 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32860 SSHKeyPath:/home/jenkins/minikube-integration/15909-3701/.minikube/machines/multinode-541903-m02/id_rsa Username:docker}
I0223 04:37:58.736864 154064 command_runner.go:130] > 17%!
(MISSING)I0223 04:37:58.737137 154064 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0223 04:37:58.741041 154064 command_runner.go:130] > 244G
I0223 04:37:58.741241 154064 start.go:128] duration metric: createHost completed in 9.708418096s
I0223 04:37:58.741262 154064 start.go:83] releasing machines lock for "multinode-541903-m02", held for 9.708554968s
I0223 04:37:58.741330 154064 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" multinode-541903-m02
I0223 04:37:58.820127 154064 out.go:177] * Found network options:
I0223 04:37:58.821922 154064 out.go:177] - NO_PROXY=192.168.58.2
W0223 04:37:58.823345 154064 proxy.go:119] fail to check proxy env: Error ip not in block
W0223 04:37:58.823400 154064 proxy.go:119] fail to check proxy env: Error ip not in block
I0223 04:37:58.823479 154064 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0223 04:37:58.823531 154064 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-541903-m02
I0223 04:37:58.823556 154064 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0223 04:37:58.823623 154064 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-541903-m02
I0223 04:37:58.908622 154064 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32860 SSHKeyPath:/home/jenkins/minikube-integration/15909-3701/.minikube/machines/multinode-541903-m02/id_rsa Username:docker}
I0223 04:37:58.917887 154064 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32860 SSHKeyPath:/home/jenkins/minikube-integration/15909-3701/.minikube/machines/multinode-541903-m02/id_rsa Username:docker}
I0223 04:37:59.007659 154064 command_runner.go:130] > File: /etc/cni/net.d/200-loopback.conf
I0223 04:37:59.007691 154064 command_runner.go:130] > Size: 54 Blocks: 8 IO Block: 4096 regular file
I0223 04:37:59.007702 154064 command_runner.go:130] > Device: f9h/249d Inode: 1319768 Links: 1
I0223 04:37:59.007713 154064 command_runner.go:130] > Access: (0644/-rw-r--r--) Uid: ( 0/ root) Gid: ( 0/ root)
I0223 04:37:59.007723 154064 command_runner.go:130] > Access: 2023-01-10 16:48:19.000000000 +0000
I0223 04:37:59.007732 154064 command_runner.go:130] > Modify: 2023-01-10 16:48:19.000000000 +0000
I0223 04:37:59.007747 154064 command_runner.go:130] > Change: 2023-02-23 04:22:37.544324683 +0000
I0223 04:37:59.007754 154064 command_runner.go:130] > Birth: -
I0223 04:37:59.007873 154064 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0223 04:37:59.042520 154064 command_runner.go:130] > <a href="https://github.com/kubernetes/registry.k8s.io">Temporary Redirect</a>.
I0223 04:37:59.044276 154064 cni.go:229] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0223 04:37:59.044341 154064 ssh_runner.go:195] Run: which cri-dockerd
I0223 04:37:59.047087 154064 command_runner.go:130] > /usr/bin/cri-dockerd
I0223 04:37:59.047196 154064 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I0223 04:37:59.054210 154064 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (135 bytes)
I0223 04:37:59.066577 154064 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%!p(MISSING), " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0223 04:37:59.081279 154064 command_runner.go:139] > /etc/cni/net.d/100-crio-bridge.conf,
I0223 04:37:59.081321 154064 cni.go:261] disabled [/etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0223 04:37:59.081334 154064 start.go:485] detecting cgroup driver to use...
I0223 04:37:59.081366 154064 detect.go:196] detected "cgroupfs" cgroup driver on host os
I0223 04:37:59.081470 154064 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %!s(MISSING) "runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0223 04:37:59.092299 154064 command_runner.go:130] > runtime-endpoint: unix:///run/containerd/containerd.sock
I0223 04:37:59.092320 154064 command_runner.go:130] > image-endpoint: unix:///run/containerd/containerd.sock
I0223 04:37:59.093019 154064 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.9"|' /etc/containerd/config.toml"
I0223 04:37:59.100183 154064 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0223 04:37:59.112425 154064 containerd.go:145] configuring containerd to use "cgroupfs" as cgroup driver...
I0223 04:37:59.112481 154064 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I0223 04:37:59.122380 154064 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0223 04:37:59.131043 154064 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0223 04:37:59.139839 154064 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0223 04:37:59.147468 154064 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0223 04:37:59.156552 154064 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0223 04:37:59.164175 154064 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0223 04:37:59.169479 154064 command_runner.go:130] > net.bridge.bridge-nf-call-iptables = 1
I0223 04:37:59.169971 154064 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0223 04:37:59.175671 154064 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0223 04:37:59.295462 154064 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0223 04:37:59.395250 154064 start.go:485] detecting cgroup driver to use...
I0223 04:37:59.395302 154064 detect.go:196] detected "cgroupfs" cgroup driver on host os
I0223 04:37:59.395351 154064 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0223 04:37:59.409624 154064 command_runner.go:130] > # /lib/systemd/system/docker.service
I0223 04:37:59.409648 154064 command_runner.go:130] > [Unit]
I0223 04:37:59.409663 154064 command_runner.go:130] > Description=Docker Application Container Engine
I0223 04:37:59.409671 154064 command_runner.go:130] > Documentation=https://docs.docker.com
I0223 04:37:59.409679 154064 command_runner.go:130] > BindsTo=containerd.service
I0223 04:37:59.409688 154064 command_runner.go:130] > After=network-online.target firewalld.service containerd.service
I0223 04:37:59.409695 154064 command_runner.go:130] > Wants=network-online.target
I0223 04:37:59.409703 154064 command_runner.go:130] > Requires=docker.socket
I0223 04:37:59.409715 154064 command_runner.go:130] > StartLimitBurst=3
I0223 04:37:59.409721 154064 command_runner.go:130] > StartLimitIntervalSec=60
I0223 04:37:59.409732 154064 command_runner.go:130] > [Service]
I0223 04:37:59.409738 154064 command_runner.go:130] > Type=notify
I0223 04:37:59.409745 154064 command_runner.go:130] > Restart=on-failure
I0223 04:37:59.409752 154064 command_runner.go:130] > Environment=NO_PROXY=192.168.58.2
I0223 04:37:59.409770 154064 command_runner.go:130] > # This file is a systemd drop-in unit that inherits from the base dockerd configuration.
I0223 04:37:59.409787 154064 command_runner.go:130] > # The base configuration already specifies an 'ExecStart=...' command. The first directive
I0223 04:37:59.409800 154064 command_runner.go:130] > # here is to clear out that command inherited from the base configuration. Without this,
I0223 04:37:59.409814 154064 command_runner.go:130] > # the command from the base configuration and the command specified here are treated as
I0223 04:37:59.409827 154064 command_runner.go:130] > # a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
I0223 04:37:59.409844 154064 command_runner.go:130] > # will catch this invalid input and refuse to start the service with an error like:
I0223 04:37:59.409861 154064 command_runner.go:130] > # Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
I0223 04:37:59.409882 154064 command_runner.go:130] > # NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
I0223 04:37:59.409898 154064 command_runner.go:130] > # container runtimes. If left unlimited, it may result in OOM issues with MySQL.
I0223 04:37:59.409905 154064 command_runner.go:130] > ExecStart=
I0223 04:37:59.409935 154064 command_runner.go:130] > ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
I0223 04:37:59.409948 154064 command_runner.go:130] > ExecReload=/bin/kill -s HUP $MAINPID
I0223 04:37:59.409962 154064 command_runner.go:130] > # Having non-zero Limit*s causes performance problems due to accounting overhead
I0223 04:37:59.409978 154064 command_runner.go:130] > # in the kernel. We recommend using cgroups to do container-local accounting.
I0223 04:37:59.409990 154064 command_runner.go:130] > LimitNOFILE=infinity
I0223 04:37:59.409996 154064 command_runner.go:130] > LimitNPROC=infinity
I0223 04:37:59.410003 154064 command_runner.go:130] > LimitCORE=infinity
I0223 04:37:59.410016 154064 command_runner.go:130] > # Uncomment TasksMax if your systemd version supports it.
I0223 04:37:59.410027 154064 command_runner.go:130] > # Only systemd 226 and above support this version.
I0223 04:37:59.410039 154064 command_runner.go:130] > TasksMax=infinity
I0223 04:37:59.410055 154064 command_runner.go:130] > TimeoutStartSec=0
I0223 04:37:59.410066 154064 command_runner.go:130] > # set delegate yes so that systemd does not reset the cgroups of docker containers
I0223 04:37:59.410077 154064 command_runner.go:130] > Delegate=yes
I0223 04:37:59.410097 154064 command_runner.go:130] > # kill only the docker process, not all processes in the cgroup
I0223 04:37:59.410111 154064 command_runner.go:130] > KillMode=process
I0223 04:37:59.410117 154064 command_runner.go:130] > [Install]
I0223 04:37:59.410127 154064 command_runner.go:130] > WantedBy=multi-user.target
I0223 04:37:59.410151 154064 cruntime.go:273] skipping containerd shutdown because we are bound to it
I0223 04:37:59.410199 154064 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0223 04:37:59.421500 154064 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %!s(MISSING) "runtime-endpoint: unix:///var/run/cri-dockerd.sock
image-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I0223 04:37:59.436365 154064 command_runner.go:130] > runtime-endpoint: unix:///var/run/cri-dockerd.sock
I0223 04:37:59.436396 154064 command_runner.go:130] > image-endpoint: unix:///var/run/cri-dockerd.sock
I0223 04:37:59.437679 154064 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I0223 04:37:59.576670 154064 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I0223 04:37:59.667595 154064 docker.go:529] configuring docker to use "cgroupfs" as cgroup driver...
I0223 04:37:59.667626 154064 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (144 bytes)
I0223 04:37:59.681058 154064 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0223 04:37:59.767322 154064 ssh_runner.go:195] Run: sudo systemctl restart docker
I0223 04:37:59.993893 154064 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I0223 04:38:00.069792 154064 command_runner.go:130] ! Created symlink /etc/systemd/system/sockets.target.wants/cri-docker.socket → /lib/systemd/system/cri-docker.socket.
I0223 04:38:00.069872 154064 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I0223 04:38:00.142507 154064 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I0223 04:38:00.223113 154064 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0223 04:38:00.298200 154064 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I0223 04:38:00.308794 154064 start.go:532] Will wait 60s for socket path /var/run/cri-dockerd.sock
I0223 04:38:00.308846 154064 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I0223 04:38:00.311648 154064 command_runner.go:130] > File: /var/run/cri-dockerd.sock
I0223 04:38:00.311665 154064 command_runner.go:130] > Size: 0 Blocks: 0 IO Block: 4096 socket
I0223 04:38:00.311671 154064 command_runner.go:130] > Device: 100002h/1048578d Inode: 206 Links: 1
I0223 04:38:00.311678 154064 command_runner.go:130] > Access: (0660/srw-rw----) Uid: ( 0/ root) Gid: ( 999/ docker)
I0223 04:38:00.311683 154064 command_runner.go:130] > Access: 2023-02-23 04:38:00.301097061 +0000
I0223 04:38:00.311689 154064 command_runner.go:130] > Modify: 2023-02-23 04:38:00.301097061 +0000
I0223 04:38:00.311694 154064 command_runner.go:130] > Change: 2023-02-23 04:38:00.301097061 +0000
I0223 04:38:00.311698 154064 command_runner.go:130] > Birth: -
I0223 04:38:00.311720 154064 start.go:553] Will wait 60s for crictl version
I0223 04:38:00.311770 154064 ssh_runner.go:195] Run: which crictl
I0223 04:38:00.314097 154064 command_runner.go:130] > /usr/bin/crictl
I0223 04:38:00.314270 154064 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0223 04:38:00.387743 154064 command_runner.go:130] > Version: 0.1.0
I0223 04:38:00.387764 154064 command_runner.go:130] > RuntimeName: docker
I0223 04:38:00.387768 154064 command_runner.go:130] > RuntimeVersion: 23.0.1
I0223 04:38:00.387774 154064 command_runner.go:130] > RuntimeApiVersion: v1alpha2
I0223 04:38:00.389329 154064 start.go:569] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 23.0.1
RuntimeApiVersion: v1alpha2
I0223 04:38:00.389392 154064 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0223 04:38:00.410683 154064 command_runner.go:130] > 23.0.1
I0223 04:38:00.410747 154064 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0223 04:38:00.432086 154064 command_runner.go:130] > 23.0.1
I0223 04:38:00.434554 154064 out.go:204] * Preparing Kubernetes v1.26.1 on Docker 23.0.1 ...
I0223 04:38:00.435979 154064 out.go:177] - env NO_PROXY=192.168.58.2
I0223 04:38:00.437240 154064 cli_runner.go:164] Run: docker network inspect multinode-541903 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0223 04:38:00.500820 154064 ssh_runner.go:195] Run: grep 192.168.58.1 host.minikube.internal$ /etc/hosts
I0223 04:38:00.504165 154064 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.58.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0223 04:38:00.513138 154064 certs.go:56] Setting up /home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903 for IP: 192.168.58.3
I0223 04:38:00.513163 154064 certs.go:186] acquiring lock for shared ca certs: {Name:mk899ab74bdb751a75c201c69d3c66668a7f7f94 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0223 04:38:00.513297 154064 certs.go:195] skipping minikubeCA CA generation: /home/jenkins/minikube-integration/15909-3701/.minikube/ca.key
I0223 04:38:00.513351 154064 certs.go:195] skipping proxyClientCA CA generation: /home/jenkins/minikube-integration/15909-3701/.minikube/proxy-client-ca.key
I0223 04:38:00.513368 154064 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/15909-3701/.minikube/ca.crt -> /var/lib/minikube/certs/ca.crt
I0223 04:38:00.513384 154064 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/15909-3701/.minikube/ca.key -> /var/lib/minikube/certs/ca.key
I0223 04:38:00.513397 154064 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/15909-3701/.minikube/proxy-client-ca.crt -> /var/lib/minikube/certs/proxy-client-ca.crt
I0223 04:38:00.513414 154064 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/15909-3701/.minikube/proxy-client-ca.key -> /var/lib/minikube/certs/proxy-client-ca.key
I0223 04:38:00.513482 154064 certs.go:401] found cert: /home/jenkins/minikube-integration/15909-3701/.minikube/certs/home/jenkins/minikube-integration/15909-3701/.minikube/certs/10376.pem (1338 bytes)
W0223 04:38:00.513519 154064 certs.go:397] ignoring /home/jenkins/minikube-integration/15909-3701/.minikube/certs/home/jenkins/minikube-integration/15909-3701/.minikube/certs/10376_empty.pem, impossibly tiny 0 bytes
I0223 04:38:00.513541 154064 certs.go:401] found cert: /home/jenkins/minikube-integration/15909-3701/.minikube/certs/home/jenkins/minikube-integration/15909-3701/.minikube/certs/ca-key.pem (1675 bytes)
I0223 04:38:00.513599 154064 certs.go:401] found cert: /home/jenkins/minikube-integration/15909-3701/.minikube/certs/home/jenkins/minikube-integration/15909-3701/.minikube/certs/ca.pem (1082 bytes)
I0223 04:38:00.513644 154064 certs.go:401] found cert: /home/jenkins/minikube-integration/15909-3701/.minikube/certs/home/jenkins/minikube-integration/15909-3701/.minikube/certs/cert.pem (1123 bytes)
I0223 04:38:00.513682 154064 certs.go:401] found cert: /home/jenkins/minikube-integration/15909-3701/.minikube/certs/home/jenkins/minikube-integration/15909-3701/.minikube/certs/key.pem (1675 bytes)
I0223 04:38:00.513735 154064 certs.go:401] found cert: /home/jenkins/minikube-integration/15909-3701/.minikube/files/etc/ssl/certs/home/jenkins/minikube-integration/15909-3701/.minikube/files/etc/ssl/certs/103762.pem (1708 bytes)
I0223 04:38:00.513774 154064 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/15909-3701/.minikube/certs/10376.pem -> /usr/share/ca-certificates/10376.pem
I0223 04:38:00.513793 154064 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/15909-3701/.minikube/files/etc/ssl/certs/103762.pem -> /usr/share/ca-certificates/103762.pem
I0223 04:38:00.513811 154064 vm_assets.go:163] NewFileAsset: /home/jenkins/minikube-integration/15909-3701/.minikube/ca.crt -> /usr/share/ca-certificates/minikubeCA.pem
I0223 04:38:00.514171 154064 ssh_runner.go:362] scp /home/jenkins/minikube-integration/15909-3701/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0223 04:38:00.530664 154064 ssh_runner.go:362] scp /home/jenkins/minikube-integration/15909-3701/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I0223 04:38:00.547133 154064 ssh_runner.go:362] scp /home/jenkins/minikube-integration/15909-3701/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0223 04:38:00.563529 154064 ssh_runner.go:362] scp /home/jenkins/minikube-integration/15909-3701/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I0223 04:38:00.579434 154064 ssh_runner.go:362] scp /home/jenkins/minikube-integration/15909-3701/.minikube/certs/10376.pem --> /usr/share/ca-certificates/10376.pem (1338 bytes)
I0223 04:38:00.595468 154064 ssh_runner.go:362] scp /home/jenkins/minikube-integration/15909-3701/.minikube/files/etc/ssl/certs/103762.pem --> /usr/share/ca-certificates/103762.pem (1708 bytes)
I0223 04:38:00.612064 154064 ssh_runner.go:362] scp /home/jenkins/minikube-integration/15909-3701/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0223 04:38:00.628262 154064 ssh_runner.go:195] Run: openssl version
I0223 04:38:00.632636 154064 command_runner.go:130] > OpenSSL 1.1.1f 31 Mar 2020
I0223 04:38:00.632761 154064 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/10376.pem && ln -fs /usr/share/ca-certificates/10376.pem /etc/ssl/certs/10376.pem"
I0223 04:38:00.639669 154064 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/10376.pem
I0223 04:38:00.642469 154064 command_runner.go:130] > -rw-r--r-- 1 root root 1338 Feb 23 04:26 /usr/share/ca-certificates/10376.pem
I0223 04:38:00.642539 154064 certs.go:444] hashing: -rw-r--r-- 1 root root 1338 Feb 23 04:26 /usr/share/ca-certificates/10376.pem
I0223 04:38:00.642581 154064 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/10376.pem
I0223 04:38:00.646854 154064 command_runner.go:130] > 51391683
I0223 04:38:00.647027 154064 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/10376.pem /etc/ssl/certs/51391683.0"
I0223 04:38:00.653863 154064 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/103762.pem && ln -fs /usr/share/ca-certificates/103762.pem /etc/ssl/certs/103762.pem"
I0223 04:38:00.660791 154064 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/103762.pem
I0223 04:38:00.663719 154064 command_runner.go:130] > -rw-r--r-- 1 root root 1708 Feb 23 04:26 /usr/share/ca-certificates/103762.pem
I0223 04:38:00.663789 154064 certs.go:444] hashing: -rw-r--r-- 1 root root 1708 Feb 23 04:26 /usr/share/ca-certificates/103762.pem
I0223 04:38:00.663836 154064 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/103762.pem
I0223 04:38:00.668130 154064 command_runner.go:130] > 3ec20f2e
I0223 04:38:00.668325 154064 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/103762.pem /etc/ssl/certs/3ec20f2e.0"
I0223 04:38:00.675215 154064 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0223 04:38:00.682094 154064 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0223 04:38:00.684817 154064 command_runner.go:130] > -rw-r--r-- 1 root root 1111 Feb 23 04:22 /usr/share/ca-certificates/minikubeCA.pem
I0223 04:38:00.684898 154064 certs.go:444] hashing: -rw-r--r-- 1 root root 1111 Feb 23 04:22 /usr/share/ca-certificates/minikubeCA.pem
I0223 04:38:00.684937 154064 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0223 04:38:00.689413 154064 command_runner.go:130] > b5213941
I0223 04:38:00.689467 154064 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0223 04:38:00.696375 154064 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I0223 04:38:00.717175 154064 command_runner.go:130] > cgroupfs
I0223 04:38:00.718262 154064 cni.go:84] Creating CNI manager for ""
I0223 04:38:00.718279 154064 cni.go:136] 2 nodes found, recommending kindnet
I0223 04:38:00.718292 154064 kubeadm.go:87] Using pod CIDR: 10.244.0.0/16
I0223 04:38:00.718316 154064 kubeadm.go:172] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.58.3 APIServerPort:8443 KubernetesVersion:v1.26.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:multinode-541903 NodeName:multinode-541903-m02 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.58.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.58.3 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:
/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[hairpinMode:hairpin-veth runtimeRequestTimeout:15m]}
I0223 04:38:00.718451 154064 kubeadm.go:177] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.58.3
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: /var/run/cri-dockerd.sock
name: "multinode-541903-m02"
kubeletExtraArgs:
node-ip: 192.168.58.3
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.58.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.26.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%!"(MISSING)
nodefs.inodesFree: "0%!"(MISSING)
imagefs.available: "0%!"(MISSING)
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0223 04:38:00.718505 154064 kubeadm.go:968] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.26.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=/var/run/cri-dockerd.sock --hostname-override=multinode-541903-m02 --image-service-endpoint=/var/run/cri-dockerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.58.3
[Install]
config:
{KubernetesVersion:v1.26.1 ClusterName:multinode-541903 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:}
I0223 04:38:00.718547 154064 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.26.1
I0223 04:38:00.724721 154064 command_runner.go:130] > kubeadm
I0223 04:38:00.724739 154064 command_runner.go:130] > kubectl
I0223 04:38:00.724744 154064 command_runner.go:130] > kubelet
I0223 04:38:00.725230 154064 binaries.go:44] Found k8s binaries, skipping transfer
I0223 04:38:00.725283 154064 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system
I0223 04:38:00.731867 154064 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (452 bytes)
I0223 04:38:00.743990 154064 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0223 04:38:00.755557 154064 ssh_runner.go:195] Run: grep 192.168.58.2 control-plane.minikube.internal$ /etc/hosts
I0223 04:38:00.758219 154064 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.58.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0223 04:38:00.766848 154064 host.go:66] Checking if "multinode-541903" exists ...
I0223 04:38:00.767040 154064 config.go:182] Loaded profile config "multinode-541903": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.26.1
I0223 04:38:00.767081 154064 start.go:301] JoinCluster: &{Name:multinode-541903 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.37-1676506612-15768@sha256:cc1cb283879fedae93096946a6953a50075ed680d467a47cbf669e0ed7d3aebc Memory:2200 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.26.1 ClusterName:multinode-541903 Namespace:default APIServerName:minikubeCA APIServerNames:[] A
PIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP:192.168.58.2 Port:8443 KubernetesVersion:v1.26.1 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m02 IP:192.168.58.3 Port:0 KubernetesVersion:v1.26.1 ContainerRuntime:docker ControlPlane:false Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVe
rsion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP:}
I0223 04:38:00.767159 154064 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.26.1:$PATH" kubeadm token create --print-join-command --ttl=0"
I0223 04:38:00.767194 154064 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-541903
I0223 04:38:00.831653 154064 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32855 SSHKeyPath:/home/jenkins/minikube-integration/15909-3701/.minikube/machines/multinode-541903/id_rsa Username:docker}
I0223 04:38:01.031751 154064 command_runner.go:130] > kubeadm join control-plane.minikube.internal:8443 --token hgeakg.fqoggq0ad93vz1r2 --discovery-token-ca-cert-hash sha256:7af6222537f3e512b57f8a6cd5cfb272dbfac3f8b48cc16883a30ea779c69b8f
I0223 04:38:01.035580 154064 start.go:322] trying to join worker node "m02" to cluster: &{Name:m02 IP:192.168.58.3 Port:0 KubernetesVersion:v1.26.1 ContainerRuntime:docker ControlPlane:false Worker:true}
I0223 04:38:01.035625 154064 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.26.1:$PATH" kubeadm join control-plane.minikube.internal:8443 --token hgeakg.fqoggq0ad93vz1r2 --discovery-token-ca-cert-hash sha256:7af6222537f3e512b57f8a6cd5cfb272dbfac3f8b48cc16883a30ea779c69b8f --ignore-preflight-errors=all --cri-socket /var/run/cri-dockerd.sock --node-name=multinode-541903-m02"
I0223 04:38:01.072361 154064 command_runner.go:130] > [preflight] Running pre-flight checks
I0223 04:38:01.098073 154064 command_runner.go:130] > [preflight] The system verification failed. Printing the output from the verification:
I0223 04:38:01.098103 154064 command_runner.go:130] > [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1029-gcp[0m
I0223 04:38:01.098111 154064 command_runner.go:130] > [0;37mOS[0m: [0;32mLinux[0m
I0223 04:38:01.098119 154064 command_runner.go:130] > [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0223 04:38:01.098129 154064 command_runner.go:130] > [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I0223 04:38:01.098138 154064 command_runner.go:130] > [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0223 04:38:01.098148 154064 command_runner.go:130] > [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0223 04:38:01.098161 154064 command_runner.go:130] > [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0223 04:38:01.098169 154064 command_runner.go:130] > [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0223 04:38:01.098184 154064 command_runner.go:130] > [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0223 04:38:01.098199 154064 command_runner.go:130] > [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0223 04:38:01.098211 154064 command_runner.go:130] > [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I0223 04:38:01.174036 154064 command_runner.go:130] > [preflight] Reading configuration from the cluster...
I0223 04:38:01.174066 154064 command_runner.go:130] > [preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
I0223 04:38:01.199278 154064 command_runner.go:130] > [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0223 04:38:01.201331 154064 command_runner.go:130] > [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0223 04:38:01.201341 154064 command_runner.go:130] > [kubelet-start] Starting the kubelet
I0223 04:38:01.273298 154064 command_runner.go:130] > [kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
I0223 04:38:02.787741 154064 command_runner.go:130] > This node has joined the cluster:
I0223 04:38:02.787769 154064 command_runner.go:130] > * Certificate signing request was sent to apiserver and a response was received.
I0223 04:38:02.787780 154064 command_runner.go:130] > * The Kubelet was informed of the new secure connection details.
I0223 04:38:02.787789 154064 command_runner.go:130] > Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
I0223 04:38:02.790268 154064 command_runner.go:130] ! W0223 04:38:01.071998 1332 initconfiguration.go:119] Usage of CRI endpoints without URL scheme is deprecated and can cause kubelet errors in the future. Automatically prepending scheme "unix" to the "criSocket" with value "/var/run/cri-dockerd.sock". Please update your configuration!
I0223 04:38:02.790291 154064 command_runner.go:130] ! [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1029-gcp\n", err: exit status 1
I0223 04:38:02.790299 154064 command_runner.go:130] ! [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0223 04:38:02.790323 154064 ssh_runner.go:235] Completed: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.26.1:$PATH" kubeadm join control-plane.minikube.internal:8443 --token hgeakg.fqoggq0ad93vz1r2 --discovery-token-ca-cert-hash sha256:7af6222537f3e512b57f8a6cd5cfb272dbfac3f8b48cc16883a30ea779c69b8f --ignore-preflight-errors=all --cri-socket /var/run/cri-dockerd.sock --node-name=multinode-541903-m02": (1.754681248s)
I0223 04:38:02.790345 154064 ssh_runner.go:195] Run: /bin/bash -c "sudo systemctl daemon-reload && sudo systemctl enable kubelet && sudo systemctl start kubelet"
I0223 04:38:02.962967 154064 command_runner.go:130] ! Created symlink /etc/systemd/system/multi-user.target.wants/kubelet.service → /lib/systemd/system/kubelet.service.
I0223 04:38:02.963005 154064 start.go:303] JoinCluster complete in 2.195922235s
I0223 04:38:02.963015 154064 cni.go:84] Creating CNI manager for ""
I0223 04:38:02.963020 154064 cni.go:136] 2 nodes found, recommending kindnet
I0223 04:38:02.963055 154064 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I0223 04:38:02.966122 154064 command_runner.go:130] > File: /opt/cni/bin/portmap
I0223 04:38:02.966156 154064 command_runner.go:130] > Size: 2828728 Blocks: 5528 IO Block: 4096 regular file
I0223 04:38:02.966169 154064 command_runner.go:130] > Device: 36h/54d Inode: 1317857 Links: 1
I0223 04:38:02.966180 154064 command_runner.go:130] > Access: (0755/-rwxr-xr-x) Uid: ( 0/ root) Gid: ( 0/ root)
I0223 04:38:02.966192 154064 command_runner.go:130] > Access: 2022-05-18 18:39:21.000000000 +0000
I0223 04:38:02.966197 154064 command_runner.go:130] > Modify: 2022-05-18 18:39:21.000000000 +0000
I0223 04:38:02.966211 154064 command_runner.go:130] > Change: 2023-02-23 04:22:36.812251096 +0000
I0223 04:38:02.966219 154064 command_runner.go:130] > Birth: -
I0223 04:38:02.966281 154064 cni.go:181] applying CNI manifest using /var/lib/minikube/binaries/v1.26.1/kubectl ...
I0223 04:38:02.966295 154064 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2428 bytes)
I0223 04:38:02.978321 154064 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.26.1/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I0223 04:38:03.145441 154064 command_runner.go:130] > clusterrole.rbac.authorization.k8s.io/kindnet unchanged
I0223 04:38:03.145468 154064 command_runner.go:130] > clusterrolebinding.rbac.authorization.k8s.io/kindnet unchanged
I0223 04:38:03.145477 154064 command_runner.go:130] > serviceaccount/kindnet unchanged
I0223 04:38:03.145489 154064 command_runner.go:130] > daemonset.apps/kindnet configured
I0223 04:38:03.145800 154064 loader.go:373] Config loaded from file: /home/jenkins/minikube-integration/15909-3701/kubeconfig
I0223 04:38:03.146001 154064 kapi.go:59] client config for multinode-541903: &rest.Config{Host:"https://192.168.58.2:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/client.crt", KeyFile:"/home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/client.key", CAFile:"/home/jenkins/minikube-integration/15909-3701/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProt
os:[]string(nil)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x299afe0), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
I0223 04:38:03.146252 154064 round_trippers.go:463] GET https://192.168.58.2:8443/apis/apps/v1/namespaces/kube-system/deployments/coredns/scale
I0223 04:38:03.146261 154064 round_trippers.go:469] Request Headers:
I0223 04:38:03.146269 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:38:03.146278 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:38:03.147739 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:38:03.147755 154064 round_trippers.go:577] Response Headers:
I0223 04:38:03.147762 154064 round_trippers.go:580] Audit-Id: 77f67e69-00c2-4cbd-a630-56d354208db8
I0223 04:38:03.147768 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:38:03.147774 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:38:03.147780 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:38:03.147789 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:38:03.147795 154064 round_trippers.go:580] Content-Length: 291
I0223 04:38:03.147800 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:38:03 GMT
I0223 04:38:03.147826 154064 request.go:1171] Response Body: {"kind":"Scale","apiVersion":"autoscaling/v1","metadata":{"name":"coredns","namespace":"kube-system","uid":"7dc6990a-65df-4e2e-8c11-e636c86cc878","resourceVersion":"430","creationTimestamp":"2023-02-23T04:37:31Z"},"spec":{"replicas":2},"status":{"replicas":2,"selector":"k8s-app=kube-dns"}}
I0223 04:38:03.147949 154064 request.go:1171] Request Body: {"kind":"Scale","apiVersion":"autoscaling/v1","metadata":{"name":"coredns","namespace":"kube-system","uid":"7dc6990a-65df-4e2e-8c11-e636c86cc878","resourceVersion":"430","creationTimestamp":"2023-02-23T04:37:31Z"},"spec":{"replicas":1},"status":{"replicas":2,"selector":"k8s-app=kube-dns"}}
I0223 04:38:03.147984 154064 round_trippers.go:463] PUT https://192.168.58.2:8443/apis/apps/v1/namespaces/kube-system/deployments/coredns/scale
I0223 04:38:03.147990 154064 round_trippers.go:469] Request Headers:
I0223 04:38:03.147996 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:38:03.148005 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:38:03.148013 154064 round_trippers.go:473] Content-Type: application/json
I0223 04:38:03.152635 154064 round_trippers.go:574] Response Status: 200 OK in 4 milliseconds
I0223 04:38:03.152654 154064 round_trippers.go:577] Response Headers:
I0223 04:38:03.152661 154064 round_trippers.go:580] Audit-Id: 75d1263b-ec9d-4880-a47f-023c210cf6da
I0223 04:38:03.152667 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:38:03.152673 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:38:03.152687 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:38:03.152695 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:38:03.152707 154064 round_trippers.go:580] Content-Length: 291
I0223 04:38:03.152719 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:38:03 GMT
I0223 04:38:03.152737 154064 request.go:1171] Response Body: {"kind":"Scale","apiVersion":"autoscaling/v1","metadata":{"name":"coredns","namespace":"kube-system","uid":"7dc6990a-65df-4e2e-8c11-e636c86cc878","resourceVersion":"466","creationTimestamp":"2023-02-23T04:37:31Z"},"spec":{"replicas":1},"status":{"replicas":2,"selector":"k8s-app=kube-dns"}}
I0223 04:38:03.653163 154064 round_trippers.go:463] GET https://192.168.58.2:8443/apis/apps/v1/namespaces/kube-system/deployments/coredns/scale
I0223 04:38:03.653183 154064 round_trippers.go:469] Request Headers:
I0223 04:38:03.653191 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:38:03.653198 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:38:03.654878 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:38:03.654897 154064 round_trippers.go:577] Response Headers:
I0223 04:38:03.654905 154064 round_trippers.go:580] Content-Length: 291
I0223 04:38:03.654911 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:38:03 GMT
I0223 04:38:03.654919 154064 round_trippers.go:580] Audit-Id: 12b6ec3f-00c5-4221-b9bf-4a957283efec
I0223 04:38:03.654925 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:38:03.654933 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:38:03.654941 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:38:03.654950 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:38:03.654973 154064 request.go:1171] Response Body: {"kind":"Scale","apiVersion":"autoscaling/v1","metadata":{"name":"coredns","namespace":"kube-system","uid":"7dc6990a-65df-4e2e-8c11-e636c86cc878","resourceVersion":"476","creationTimestamp":"2023-02-23T04:37:31Z"},"spec":{"replicas":1},"status":{"replicas":1,"selector":"k8s-app=kube-dns"}}
I0223 04:38:03.655063 154064 kapi.go:248] "coredns" deployment in "kube-system" namespace and "multinode-541903" context rescaled to 1 replicas
I0223 04:38:03.655087 154064 start.go:223] Will wait 6m0s for node &{Name:m02 IP:192.168.58.3 Port:0 KubernetesVersion:v1.26.1 ContainerRuntime:docker ControlPlane:false Worker:true}
I0223 04:38:03.657240 154064 out.go:177] * Verifying Kubernetes components...
I0223 04:38:03.658570 154064 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0223 04:38:03.668005 154064 loader.go:373] Config loaded from file: /home/jenkins/minikube-integration/15909-3701/kubeconfig
I0223 04:38:03.668212 154064 kapi.go:59] client config for multinode-541903: &rest.Config{Host:"https://192.168.58.2:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/client.crt", KeyFile:"/home/jenkins/minikube-integration/15909-3701/.minikube/profiles/multinode-541903/client.key", CAFile:"/home/jenkins/minikube-integration/15909-3701/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProt
os:[]string(nil)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x299afe0), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
I0223 04:38:03.668441 154064 node_ready.go:35] waiting up to 6m0s for node "multinode-541903-m02" to be "Ready" ...
I0223 04:38:03.668496 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-541903-m02
I0223 04:38:03.668506 154064 round_trippers.go:469] Request Headers:
I0223 04:38:03.668513 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:38:03.668519 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:38:03.670239 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:38:03.670260 154064 round_trippers.go:577] Response Headers:
I0223 04:38:03.670271 154064 round_trippers.go:580] Audit-Id: 24d72d60-71af-4bfd-bddc-cc9c6f840a02
I0223 04:38:03.670280 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:38:03.670289 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:38:03.670295 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:38:03.670301 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:38:03.670309 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:38:03 GMT
I0223 04:38:03.670438 154064 request.go:1171] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-541903-m02","uid":"706895f6-03be-417f-bb39-b234894374c9","resourceVersion":"464","creationTimestamp":"2023-02-23T04:38:02Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-541903-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/cri-dockerd.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-02-23T04:38:02Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.244.1.0/24\"":{}}}}},{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-02-23T04:38:02Z","fieldsT
ype":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.a [truncated 4059 chars]
I0223 04:38:03.670707 154064 node_ready.go:49] node "multinode-541903-m02" has status "Ready":"True"
I0223 04:38:03.670720 154064 node_ready.go:38] duration metric: took 2.265261ms waiting for node "multinode-541903-m02" to be "Ready" ...
I0223 04:38:03.670727 154064 pod_ready.go:35] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0223 04:38:03.670777 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods
I0223 04:38:03.670785 154064 round_trippers.go:469] Request Headers:
I0223 04:38:03.670792 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:38:03.670805 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:38:03.673509 154064 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I0223 04:38:03.673523 154064 round_trippers.go:577] Response Headers:
I0223 04:38:03.673530 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:38:03.673535 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:38:03 GMT
I0223 04:38:03.673541 154064 round_trippers.go:580] Audit-Id: 82544d0a-b554-418f-9449-21f65793ffde
I0223 04:38:03.673547 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:38:03.673557 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:38:03.673562 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:38:03.674264 154064 request.go:1171] Response Body: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"476"},"items":[{"metadata":{"name":"coredns-787d4945fb-m9kd9","generateName":"coredns-787d4945fb-","namespace":"kube-system","uid":"f98b834d-59f4-4ad0-8fc5-942542e67c77","resourceVersion":"469","creationTimestamp":"2023-02-23T04:37:44Z","deletionTimestamp":"2023-02-23T04:38:33Z","deletionGracePeriodSeconds":30,"labels":{"k8s-app":"kube-dns","pod-template-hash":"787d4945fb"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"ReplicaSet","name":"coredns-787d4945fb","uid":"172875b3-c124-4a60-93b2-e8672eacefc6","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-02-23T04:37:44Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:k8s-app":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"172875b3-c124-4a60-93b2-e8672eac
efc6\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:podAntiAffinity":{".":{ [truncated 72760 chars]
I0223 04:38:03.676469 154064 pod_ready.go:78] waiting up to 6m0s for pod "coredns-787d4945fb-m9kd9" in "kube-system" namespace to be "Ready" ...
I0223 04:38:03.676531 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/coredns-787d4945fb-m9kd9
I0223 04:38:03.676541 154064 round_trippers.go:469] Request Headers:
I0223 04:38:03.676549 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:38:03.676557 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:38:03.678131 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:38:03.678149 154064 round_trippers.go:577] Response Headers:
I0223 04:38:03.678158 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:38:03.678166 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:38:03 GMT
I0223 04:38:03.678175 154064 round_trippers.go:580] Audit-Id: 8a29a278-4e7b-4573-a3ac-ac8c78cfb256
I0223 04:38:03.678187 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:38:03.678200 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:38:03.678209 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:38:03.678286 154064 request.go:1171] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"coredns-787d4945fb-m9kd9","generateName":"coredns-787d4945fb-","namespace":"kube-system","uid":"f98b834d-59f4-4ad0-8fc5-942542e67c77","resourceVersion":"469","creationTimestamp":"2023-02-23T04:37:44Z","deletionTimestamp":"2023-02-23T04:38:33Z","deletionGracePeriodSeconds":30,"labels":{"k8s-app":"kube-dns","pod-template-hash":"787d4945fb"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"ReplicaSet","name":"coredns-787d4945fb","uid":"172875b3-c124-4a60-93b2-e8672eacefc6","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-02-23T04:37:44Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:k8s-app":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"172875b3-c124-4a60-93b2-e8672eacefc6\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:pod
AntiAffinity":{".":{},"f:preferredDuringSchedulingIgnoredDuringExecutio [truncated 6226 chars]
I0223 04:38:03.678633 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-541903
I0223 04:38:03.678643 154064 round_trippers.go:469] Request Headers:
I0223 04:38:03.678649 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:38:03.678655 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:38:03.680267 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:38:03.680284 154064 round_trippers.go:577] Response Headers:
I0223 04:38:03.680293 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:38:03 GMT
I0223 04:38:03.680301 154064 round_trippers.go:580] Audit-Id: 68ece9b1-5756-4ddf-9649-1306c7b0acb5
I0223 04:38:03.680310 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:38:03.680318 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:38:03.680331 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:38:03.680340 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:38:03.680441 154064 request.go:1171] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","resourceVersion":"460","creationTimestamp":"2023-02-23T04:37:28Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-541903","kubernetes.io/os":"linux","minikube.k8s.io/commit":"66d56dc3ac28a702789778ac47e90f12526a0321","minikube.k8s.io/name":"multinode-541903","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_02_23T04_37_32_0700","minikube.k8s.io/version":"v1.29.0","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/cri-dockerd.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","api
Version":"v1","time":"2023-02-23T04:37:28Z","fieldsType":"FieldsV1","fi [truncated 5161 chars]
I0223 04:38:03.680710 154064 pod_ready.go:92] pod "coredns-787d4945fb-m9kd9" in "kube-system" namespace has status "Ready":"True"
I0223 04:38:03.680720 154064 pod_ready.go:81] duration metric: took 4.233851ms waiting for pod "coredns-787d4945fb-m9kd9" in "kube-system" namespace to be "Ready" ...
I0223 04:38:03.680727 154064 pod_ready.go:78] waiting up to 6m0s for pod "coredns-787d4945fb-qcmgw" in "kube-system" namespace to be "Ready" ...
I0223 04:38:03.680762 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/coredns-787d4945fb-qcmgw
I0223 04:38:03.680769 154064 round_trippers.go:469] Request Headers:
I0223 04:38:03.680776 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:38:03.680782 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:38:03.682339 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:38:03.682361 154064 round_trippers.go:577] Response Headers:
I0223 04:38:03.682368 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:38:03 GMT
I0223 04:38:03.682374 154064 round_trippers.go:580] Audit-Id: b6720fdc-1716-48f1-828f-1fffc08a1542
I0223 04:38:03.682379 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:38:03.682385 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:38:03.682397 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:38:03.682413 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:38:03.682494 154064 request.go:1171] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"coredns-787d4945fb-qcmgw","generateName":"coredns-787d4945fb-","namespace":"kube-system","uid":"326c565c-307c-4df8-9dc7-e3937a6ed2f6","resourceVersion":"417","creationTimestamp":"2023-02-23T04:37:44Z","labels":{"k8s-app":"kube-dns","pod-template-hash":"787d4945fb"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"ReplicaSet","name":"coredns-787d4945fb","uid":"172875b3-c124-4a60-93b2-e8672eacefc6","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-02-23T04:37:44Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:k8s-app":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"172875b3-c124-4a60-93b2-e8672eacefc6\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:podAntiAffinity":{".":{},"f:preferredDuringSchedulingIgnoredDuringExecution":{
}}},"f:containers":{"k:{\"name\":\"coredns\"}":{".":{},"f:args":{},"f:i [truncated 6151 chars]
I0223 04:38:03.682845 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-541903
I0223 04:38:03.682857 154064 round_trippers.go:469] Request Headers:
I0223 04:38:03.682863 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:38:03.682869 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:38:03.684167 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:38:03.684185 154064 round_trippers.go:577] Response Headers:
I0223 04:38:03.684193 154064 round_trippers.go:580] Audit-Id: 3979ef9d-a81e-4f34-b7f4-caa1dbdea04e
I0223 04:38:03.684199 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:38:03.684206 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:38:03.684212 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:38:03.684221 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:38:03.684227 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:38:03 GMT
I0223 04:38:03.684302 154064 request.go:1171] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","resourceVersion":"460","creationTimestamp":"2023-02-23T04:37:28Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-541903","kubernetes.io/os":"linux","minikube.k8s.io/commit":"66d56dc3ac28a702789778ac47e90f12526a0321","minikube.k8s.io/name":"multinode-541903","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_02_23T04_37_32_0700","minikube.k8s.io/version":"v1.29.0","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/cri-dockerd.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","api
Version":"v1","time":"2023-02-23T04:37:28Z","fieldsType":"FieldsV1","fi [truncated 5161 chars]
I0223 04:38:03.684559 154064 pod_ready.go:92] pod "coredns-787d4945fb-qcmgw" in "kube-system" namespace has status "Ready":"True"
I0223 04:38:03.684569 154064 pod_ready.go:81] duration metric: took 3.837381ms waiting for pod "coredns-787d4945fb-qcmgw" in "kube-system" namespace to be "Ready" ...
I0223 04:38:03.684576 154064 pod_ready.go:78] waiting up to 6m0s for pod "etcd-multinode-541903" in "kube-system" namespace to be "Ready" ...
I0223 04:38:03.684606 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/etcd-multinode-541903
I0223 04:38:03.684613 154064 round_trippers.go:469] Request Headers:
I0223 04:38:03.684620 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:38:03.684626 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:38:03.685881 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:38:03.685899 154064 round_trippers.go:577] Response Headers:
I0223 04:38:03.685909 154064 round_trippers.go:580] Audit-Id: 557f5e22-4866-40df-8086-94a7633aae50
I0223 04:38:03.685918 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:38:03.685927 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:38:03.685936 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:38:03.685952 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:38:03.685962 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:38:03 GMT
I0223 04:38:03.686067 154064 request.go:1171] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"etcd-multinode-541903","namespace":"kube-system","uid":"d4f461e5-807f-4ab5-9619-f9678e053114","resourceVersion":"274","creationTimestamp":"2023-02-23T04:37:32Z","labels":{"component":"etcd","tier":"control-plane"},"annotations":{"kubeadm.kubernetes.io/etcd.advertise-client-urls":"https://192.168.58.2:2379","kubernetes.io/config.hash":"6d6c313fb5d82868974d1b5864c9ad55","kubernetes.io/config.mirror":"6d6c313fb5d82868974d1b5864c9ad55","kubernetes.io/config.seen":"2023-02-23T04:37:31.811093863Z","kubernetes.io/config.source":"file"},"ownerReferences":[{"apiVersion":"v1","kind":"Node","name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","controller":true}],"managedFields":[{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-02-23T04:37:32Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:kubeadm.kubernetes.io/etcd.advertise-cl
ient-urls":{},"f:kubernetes.io/config.hash":{},"f:kubernetes.io/config. [truncated 5836 chars]
I0223 04:38:03.686369 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-541903
I0223 04:38:03.686380 154064 round_trippers.go:469] Request Headers:
I0223 04:38:03.686387 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:38:03.686393 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:38:03.687739 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:38:03.687752 154064 round_trippers.go:577] Response Headers:
I0223 04:38:03.687759 154064 round_trippers.go:580] Audit-Id: d188ce24-1532-422f-b691-a86edf298535
I0223 04:38:03.687765 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:38:03.687770 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:38:03.687775 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:38:03.687784 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:38:03.687789 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:38:03 GMT
I0223 04:38:03.687856 154064 request.go:1171] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","resourceVersion":"460","creationTimestamp":"2023-02-23T04:37:28Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-541903","kubernetes.io/os":"linux","minikube.k8s.io/commit":"66d56dc3ac28a702789778ac47e90f12526a0321","minikube.k8s.io/name":"multinode-541903","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_02_23T04_37_32_0700","minikube.k8s.io/version":"v1.29.0","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/cri-dockerd.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","api
Version":"v1","time":"2023-02-23T04:37:28Z","fieldsType":"FieldsV1","fi [truncated 5161 chars]
I0223 04:38:03.688152 154064 pod_ready.go:92] pod "etcd-multinode-541903" in "kube-system" namespace has status "Ready":"True"
I0223 04:38:03.688163 154064 pod_ready.go:81] duration metric: took 3.582986ms waiting for pod "etcd-multinode-541903" in "kube-system" namespace to be "Ready" ...
I0223 04:38:03.688176 154064 pod_ready.go:78] waiting up to 6m0s for pod "kube-apiserver-multinode-541903" in "kube-system" namespace to be "Ready" ...
I0223 04:38:03.688209 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-multinode-541903
I0223 04:38:03.688217 154064 round_trippers.go:469] Request Headers:
I0223 04:38:03.688224 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:38:03.688230 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:38:03.689584 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:38:03.689603 154064 round_trippers.go:577] Response Headers:
I0223 04:38:03.689612 154064 round_trippers.go:580] Audit-Id: b4eb098b-b341-465f-9485-e24ed27a2de7
I0223 04:38:03.689621 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:38:03.689631 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:38:03.689646 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:38:03.689656 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:38:03.689669 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:38:03 GMT
I0223 04:38:03.689787 154064 request.go:1171] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"kube-apiserver-multinode-541903","namespace":"kube-system","uid":"4dac4fc4-a548-48fb-a9d3-2caab1386063","resourceVersion":"377","creationTimestamp":"2023-02-23T04:37:32Z","labels":{"component":"kube-apiserver","tier":"control-plane"},"annotations":{"kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint":"192.168.58.2:8443","kubernetes.io/config.hash":"2445aa6163a4ba977c3760378d3466e2","kubernetes.io/config.mirror":"2445aa6163a4ba977c3760378d3466e2","kubernetes.io/config.seen":"2023-02-23T04:37:31.811114466Z","kubernetes.io/config.source":"file"},"ownerReferences":[{"apiVersion":"v1","kind":"Node","name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","controller":true}],"managedFields":[{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-02-23T04:37:32Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:kubeadm.kube
rnetes.io/kube-apiserver.advertise-address.endpoint":{},"f:kubernetes.i [truncated 8222 chars]
I0223 04:38:03.690161 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-541903
I0223 04:38:03.690173 154064 round_trippers.go:469] Request Headers:
I0223 04:38:03.690180 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:38:03.690186 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:38:03.691522 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:38:03.691541 154064 round_trippers.go:577] Response Headers:
I0223 04:38:03.691558 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:38:03 GMT
I0223 04:38:03.691571 154064 round_trippers.go:580] Audit-Id: 01587fe8-61a8-4c7a-acbf-b8bb23827b58
I0223 04:38:03.691584 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:38:03.691596 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:38:03.691608 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:38:03.691620 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:38:03.691685 154064 request.go:1171] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","resourceVersion":"460","creationTimestamp":"2023-02-23T04:37:28Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-541903","kubernetes.io/os":"linux","minikube.k8s.io/commit":"66d56dc3ac28a702789778ac47e90f12526a0321","minikube.k8s.io/name":"multinode-541903","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_02_23T04_37_32_0700","minikube.k8s.io/version":"v1.29.0","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/cri-dockerd.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","api
Version":"v1","time":"2023-02-23T04:37:28Z","fieldsType":"FieldsV1","fi [truncated 5161 chars]
I0223 04:38:03.691975 154064 pod_ready.go:92] pod "kube-apiserver-multinode-541903" in "kube-system" namespace has status "Ready":"True"
I0223 04:38:03.691986 154064 pod_ready.go:81] duration metric: took 3.802802ms waiting for pod "kube-apiserver-multinode-541903" in "kube-system" namespace to be "Ready" ...
I0223 04:38:03.691992 154064 pod_ready.go:78] waiting up to 6m0s for pod "kube-controller-manager-multinode-541903" in "kube-system" namespace to be "Ready" ...
I0223 04:38:03.869326 154064 request.go:622] Waited for 177.286896ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-multinode-541903
I0223 04:38:03.869388 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-multinode-541903
I0223 04:38:03.869393 154064 round_trippers.go:469] Request Headers:
I0223 04:38:03.869400 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:38:03.869410 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:38:03.871243 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:38:03.871265 154064 round_trippers.go:577] Response Headers:
I0223 04:38:03.871275 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:38:03.871284 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:38:03.871298 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:38:03.871312 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:38:03.871324 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:38:03 GMT
I0223 04:38:03.871334 154064 round_trippers.go:580] Audit-Id: ebe8197d-5b61-4113-b50f-9f2c13f49704
I0223 04:38:03.871445 154064 request.go:1171] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"kube-controller-manager-multinode-541903","namespace":"kube-system","uid":"e94928dd-b122-471e-b5df-f9df230f4d39","resourceVersion":"301","creationTimestamp":"2023-02-23T04:37:31Z","labels":{"component":"kube-controller-manager","tier":"control-plane"},"annotations":{"kubernetes.io/config.hash":"ab29de2731c8470962019cdd999d6ffc","kubernetes.io/config.mirror":"ab29de2731c8470962019cdd999d6ffc","kubernetes.io/config.seen":"2023-02-23T04:37:21.312034503Z","kubernetes.io/config.source":"file"},"ownerReferences":[{"apiVersion":"v1","kind":"Node","name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","controller":true}],"managedFields":[{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-02-23T04:37:31Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:kubernetes.io/config.hash":{},"f:kubernetes.io/config.mirror":{},"f:kubernetes.i
o/config.seen":{},"f:kubernetes.io/config.source":{}},"f:labels":{".":{ [truncated 7797 chars]
I0223 04:38:04.069080 154064 request.go:622] Waited for 197.219048ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.58.2:8443/api/v1/nodes/multinode-541903
I0223 04:38:04.069138 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-541903
I0223 04:38:04.069145 154064 round_trippers.go:469] Request Headers:
I0223 04:38:04.069156 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:38:04.069170 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:38:04.071317 154064 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I0223 04:38:04.071376 154064 round_trippers.go:577] Response Headers:
I0223 04:38:04.071440 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:38:04 GMT
I0223 04:38:04.071457 154064 round_trippers.go:580] Audit-Id: 64578c14-d7fa-48d0-9ebf-8732b5c47065
I0223 04:38:04.071492 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:38:04.071514 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:38:04.071530 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:38:04.071545 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:38:04.071709 154064 request.go:1171] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","resourceVersion":"460","creationTimestamp":"2023-02-23T04:37:28Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-541903","kubernetes.io/os":"linux","minikube.k8s.io/commit":"66d56dc3ac28a702789778ac47e90f12526a0321","minikube.k8s.io/name":"multinode-541903","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_02_23T04_37_32_0700","minikube.k8s.io/version":"v1.29.0","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/cri-dockerd.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","api
Version":"v1","time":"2023-02-23T04:37:28Z","fieldsType":"FieldsV1","fi [truncated 5161 chars]
I0223 04:38:04.072172 154064 pod_ready.go:92] pod "kube-controller-manager-multinode-541903" in "kube-system" namespace has status "Ready":"True"
I0223 04:38:04.072192 154064 pod_ready.go:81] duration metric: took 380.192598ms waiting for pod "kube-controller-manager-multinode-541903" in "kube-system" namespace to be "Ready" ...
I0223 04:38:04.072204 154064 pod_ready.go:78] waiting up to 6m0s for pod "kube-proxy-b9nwm" in "kube-system" namespace to be "Ready" ...
I0223 04:38:04.269611 154064 request.go:622] Waited for 197.345097ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-proxy-b9nwm
I0223 04:38:04.269669 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-proxy-b9nwm
I0223 04:38:04.269676 154064 round_trippers.go:469] Request Headers:
I0223 04:38:04.269687 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:38:04.269699 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:38:04.271998 154064 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I0223 04:38:04.272022 154064 round_trippers.go:577] Response Headers:
I0223 04:38:04.272085 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:38:04.272101 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:38:04.272110 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:38:04.272119 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:38:04 GMT
I0223 04:38:04.272131 154064 round_trippers.go:580] Audit-Id: 37ee586d-b438-4ba4-9379-8aeb9e48cb88
I0223 04:38:04.272145 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:38:04.272285 154064 request.go:1171] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"kube-proxy-b9nwm","generateName":"kube-proxy-","namespace":"kube-system","uid":"aa99444f-90a3-4ea0-98ff-ccd3241d4a2c","resourceVersion":"387","creationTimestamp":"2023-02-23T04:37:44Z","labels":{"controller-revision-hash":"6bc4695d8c","k8s-app":"kube-proxy","pod-template-generation":"1"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"DaemonSet","name":"kube-proxy","uid":"20ec477f-9776-4ad2-ac05-cf498eae4952","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-02-23T04:37:44Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:controller-revision-hash":{},"f:k8s-app":{},"f:pod-template-generation":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"20ec477f-9776-4ad2-ac05-cf498eae4952\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:nodeAffinity":{".":{},"f:r
equiredDuringSchedulingIgnoredDuringExecution":{}}},"f:containers":{"k: [truncated 5529 chars]
I0223 04:38:04.469561 154064 request.go:622] Waited for 196.767742ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.58.2:8443/api/v1/nodes/multinode-541903
I0223 04:38:04.469624 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-541903
I0223 04:38:04.469631 154064 round_trippers.go:469] Request Headers:
I0223 04:38:04.469645 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:38:04.469656 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:38:04.471191 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:38:04.471210 154064 round_trippers.go:577] Response Headers:
I0223 04:38:04.471217 154064 round_trippers.go:580] Audit-Id: 3b6327b1-9486-4cf7-8734-db972d10023f
I0223 04:38:04.471223 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:38:04.471228 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:38:04.471234 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:38:04.471239 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:38:04.471244 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:38:04 GMT
I0223 04:38:04.471318 154064 request.go:1171] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","resourceVersion":"460","creationTimestamp":"2023-02-23T04:37:28Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-541903","kubernetes.io/os":"linux","minikube.k8s.io/commit":"66d56dc3ac28a702789778ac47e90f12526a0321","minikube.k8s.io/name":"multinode-541903","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_02_23T04_37_32_0700","minikube.k8s.io/version":"v1.29.0","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/cri-dockerd.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","api
Version":"v1","time":"2023-02-23T04:37:28Z","fieldsType":"FieldsV1","fi [truncated 5161 chars]
I0223 04:38:04.471614 154064 pod_ready.go:92] pod "kube-proxy-b9nwm" in "kube-system" namespace has status "Ready":"True"
I0223 04:38:04.471626 154064 pod_ready.go:81] duration metric: took 399.417135ms waiting for pod "kube-proxy-b9nwm" in "kube-system" namespace to be "Ready" ...
I0223 04:38:04.471634 154064 pod_ready.go:78] waiting up to 6m0s for pod "kube-proxy-g9qpb" in "kube-system" namespace to be "Ready" ...
I0223 04:38:04.669047 154064 request.go:622] Waited for 197.355269ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-proxy-g9qpb
I0223 04:38:04.669107 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-proxy-g9qpb
I0223 04:38:04.669114 154064 round_trippers.go:469] Request Headers:
I0223 04:38:04.669127 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:38:04.669143 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:38:04.671378 154064 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I0223 04:38:04.671412 154064 round_trippers.go:577] Response Headers:
I0223 04:38:04.671422 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:38:04.671431 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:38:04 GMT
I0223 04:38:04.671449 154064 round_trippers.go:580] Audit-Id: db29bd8e-f766-42c9-b614-fc317ff8042f
I0223 04:38:04.671467 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:38:04.671479 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:38:04.671487 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:38:04.671575 154064 request.go:1171] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"kube-proxy-g9qpb","generateName":"kube-proxy-","namespace":"kube-system","uid":"7da15e12-3257-40f2-8a07-baf4953c3a14","resourceVersion":"479","creationTimestamp":"2023-02-23T04:38:02Z","labels":{"controller-revision-hash":"6bc4695d8c","k8s-app":"kube-proxy","pod-template-generation":"1"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"DaemonSet","name":"kube-proxy","uid":"20ec477f-9776-4ad2-ac05-cf498eae4952","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-02-23T04:38:02Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:controller-revision-hash":{},"f:k8s-app":{},"f:pod-template-generation":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"20ec477f-9776-4ad2-ac05-cf498eae4952\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:nodeAffinity":{".":{},"f:r
equiredDuringSchedulingIgnoredDuringExecution":{}}},"f:containers":{"k: [truncated 5548 chars]
I0223 04:38:04.869393 154064 request.go:622] Waited for 197.354763ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.58.2:8443/api/v1/nodes/multinode-541903-m02
I0223 04:38:04.869466 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-541903-m02
I0223 04:38:04.869475 154064 round_trippers.go:469] Request Headers:
I0223 04:38:04.869483 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:38:04.869493 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:38:04.871489 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:38:04.871513 154064 round_trippers.go:577] Response Headers:
I0223 04:38:04.871523 154064 round_trippers.go:580] Audit-Id: d7e0511b-cbe2-42f5-ab77-6c2e03b0a2ec
I0223 04:38:04.871532 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:38:04.871541 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:38:04.871551 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:38:04.871564 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:38:04.871575 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:38:04 GMT
I0223 04:38:04.871666 154064 request.go:1171] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-541903-m02","uid":"706895f6-03be-417f-bb39-b234894374c9","resourceVersion":"464","creationTimestamp":"2023-02-23T04:38:02Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-541903-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/cri-dockerd.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-02-23T04:38:02Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.244.1.0/24\"":{}}}}},{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-02-23T04:38:02Z","fieldsT
ype":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.a [truncated 4059 chars]
I0223 04:38:05.372787 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-proxy-g9qpb
I0223 04:38:05.372806 154064 round_trippers.go:469] Request Headers:
I0223 04:38:05.372813 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:38:05.372820 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:38:05.374832 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:38:05.374850 154064 round_trippers.go:577] Response Headers:
I0223 04:38:05.374857 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:38:05.374863 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:38:05 GMT
I0223 04:38:05.374869 154064 round_trippers.go:580] Audit-Id: dcf7f7a9-f31d-483f-9a43-3ff80d9833e5
I0223 04:38:05.374874 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:38:05.374882 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:38:05.374890 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:38:05.374987 154064 request.go:1171] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"kube-proxy-g9qpb","generateName":"kube-proxy-","namespace":"kube-system","uid":"7da15e12-3257-40f2-8a07-baf4953c3a14","resourceVersion":"486","creationTimestamp":"2023-02-23T04:38:02Z","labels":{"controller-revision-hash":"6bc4695d8c","k8s-app":"kube-proxy","pod-template-generation":"1"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"DaemonSet","name":"kube-proxy","uid":"20ec477f-9776-4ad2-ac05-cf498eae4952","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-02-23T04:38:02Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:controller-revision-hash":{},"f:k8s-app":{},"f:pod-template-generation":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"20ec477f-9776-4ad2-ac05-cf498eae4952\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:nodeAffinity":{".":{},"f:r
equiredDuringSchedulingIgnoredDuringExecution":{}}},"f:containers":{"k: [truncated 5537 chars]
I0223 04:38:05.375379 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-541903-m02
I0223 04:38:05.375391 154064 round_trippers.go:469] Request Headers:
I0223 04:38:05.375398 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:38:05.375404 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:38:05.377041 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:38:05.377056 154064 round_trippers.go:577] Response Headers:
I0223 04:38:05.377063 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:38:05.377069 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:38:05 GMT
I0223 04:38:05.377074 154064 round_trippers.go:580] Audit-Id: d8ff18d3-02b7-4ff0-9dae-47c3b3c8cd0d
I0223 04:38:05.377079 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:38:05.377090 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:38:05.377101 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:38:05.377197 154064 request.go:1171] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-541903-m02","uid":"706895f6-03be-417f-bb39-b234894374c9","resourceVersion":"464","creationTimestamp":"2023-02-23T04:38:02Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-541903-m02","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"/var/run/cri-dockerd.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-02-23T04:38:02Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.244.1.0/24\"":{}}}}},{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2023-02-23T04:38:02Z","fieldsT
ype":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.a [truncated 4059 chars]
I0223 04:38:05.377431 154064 pod_ready.go:92] pod "kube-proxy-g9qpb" in "kube-system" namespace has status "Ready":"True"
I0223 04:38:05.377448 154064 pod_ready.go:81] duration metric: took 905.804482ms waiting for pod "kube-proxy-g9qpb" in "kube-system" namespace to be "Ready" ...
I0223 04:38:05.377456 154064 pod_ready.go:78] waiting up to 6m0s for pod "kube-scheduler-multinode-541903" in "kube-system" namespace to be "Ready" ...
I0223 04:38:05.468754 154064 request.go:622] Waited for 91.241913ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-scheduler-multinode-541903
I0223 04:38:05.468803 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/namespaces/kube-system/pods/kube-scheduler-multinode-541903
I0223 04:38:05.468808 154064 round_trippers.go:469] Request Headers:
I0223 04:38:05.468815 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:38:05.468825 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:38:05.470769 154064 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I0223 04:38:05.470792 154064 round_trippers.go:577] Response Headers:
I0223 04:38:05.470802 154064 round_trippers.go:580] Audit-Id: 77298027-38f8-488d-b92c-81fb7b968c84
I0223 04:38:05.470808 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:38:05.470816 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:38:05.470830 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:38:05.470837 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:38:05.470845 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:38:05 GMT
I0223 04:38:05.470955 154064 request.go:1171] Response Body: {"kind":"Pod","apiVersion":"v1","metadata":{"name":"kube-scheduler-multinode-541903","namespace":"kube-system","uid":"a9afc7a6-806a-415e-b3b0-747daf5498e8","resourceVersion":"382","creationTimestamp":"2023-02-23T04:37:32Z","labels":{"component":"kube-scheduler","tier":"control-plane"},"annotations":{"kubernetes.io/config.hash":"1101f4c1eb93d10ee8052cff434fb92a","kubernetes.io/config.mirror":"1101f4c1eb93d10ee8052cff434fb92a","kubernetes.io/config.seen":"2023-02-23T04:37:31.811116783Z","kubernetes.io/config.source":"file"},"ownerReferences":[{"apiVersion":"v1","kind":"Node","name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","controller":true}],"managedFields":[{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-02-23T04:37:32Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:kubernetes.io/config.hash":{},"f:kubernetes.io/config.mirror":{},"f:kubernetes.io/config.seen":{},
"f:kubernetes.io/config.source":{}},"f:labels":{".":{},"f:component":{} [truncated 4679 chars]
I0223 04:38:05.668578 154064 request.go:622] Waited for 197.273862ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.58.2:8443/api/v1/nodes/multinode-541903
I0223 04:38:05.668631 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes/multinode-541903
I0223 04:38:05.668636 154064 round_trippers.go:469] Request Headers:
I0223 04:38:05.668644 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:38:05.668651 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:38:05.670679 154064 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I0223 04:38:05.670707 154064 round_trippers.go:577] Response Headers:
I0223 04:38:05.670717 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:38:05.670726 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:38:05 GMT
I0223 04:38:05.670738 154064 round_trippers.go:580] Audit-Id: 08bb251e-bbe8-4880-9c83-ec8d269c2548
I0223 04:38:05.670751 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:38:05.670762 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:38:05.670775 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:38:05.670873 154064 request.go:1171] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","resourceVersion":"460","creationTimestamp":"2023-02-23T04:37:28Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-541903","kubernetes.io/os":"linux","minikube.k8s.io/commit":"66d56dc3ac28a702789778ac47e90f12526a0321","minikube.k8s.io/name":"multinode-541903","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_02_23T04_37_32_0700","minikube.k8s.io/version":"v1.29.0","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/cri-dockerd.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kubelet","operation":"Update","api
Version":"v1","time":"2023-02-23T04:37:28Z","fieldsType":"FieldsV1","fi [truncated 5161 chars]
I0223 04:38:05.671167 154064 pod_ready.go:92] pod "kube-scheduler-multinode-541903" in "kube-system" namespace has status "Ready":"True"
I0223 04:38:05.671182 154064 pod_ready.go:81] duration metric: took 293.719936ms waiting for pod "kube-scheduler-multinode-541903" in "kube-system" namespace to be "Ready" ...
I0223 04:38:05.671196 154064 pod_ready.go:38] duration metric: took 2.000458133s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0223 04:38:05.671222 154064 system_svc.go:44] waiting for kubelet service to be running ....
I0223 04:38:05.671274 154064 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0223 04:38:05.680587 154064 system_svc.go:56] duration metric: took 9.359914ms WaitForService to wait for kubelet.
I0223 04:38:05.680610 154064 kubeadm.go:578] duration metric: took 2.025499184s to wait for : map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] ...
I0223 04:38:05.680626 154064 node_conditions.go:102] verifying NodePressure condition ...
I0223 04:38:05.869065 154064 request.go:622] Waited for 188.370368ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.58.2:8443/api/v1/nodes
I0223 04:38:05.869134 154064 round_trippers.go:463] GET https://192.168.58.2:8443/api/v1/nodes
I0223 04:38:05.869142 154064 round_trippers.go:469] Request Headers:
I0223 04:38:05.869152 154064 round_trippers.go:473] Accept: application/json, */*
I0223 04:38:05.869177 154064 round_trippers.go:473] User-Agent: minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format
I0223 04:38:05.872929 154064 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I0223 04:38:05.872968 154064 round_trippers.go:577] Response Headers:
I0223 04:38:05.872979 154064 round_trippers.go:580] Content-Type: application/json
I0223 04:38:05.872988 154064 round_trippers.go:580] X-Kubernetes-Pf-Flowschema-Uid: 0e806fdb-28a0-48db-9063-d67fdd19ddd0
I0223 04:38:05.872998 154064 round_trippers.go:580] X-Kubernetes-Pf-Prioritylevel-Uid: d9dafd77-df4a-4035-9904-af3d6a0be465
I0223 04:38:05.873009 154064 round_trippers.go:580] Date: Thu, 23 Feb 2023 04:38:05 GMT
I0223 04:38:05.873022 154064 round_trippers.go:580] Audit-Id: a569f42e-7103-4782-9ef0-c32c7dc90de6
I0223 04:38:05.873036 154064 round_trippers.go:580] Cache-Control: no-cache, private
I0223 04:38:05.873253 154064 request.go:1171] Response Body: {"kind":"NodeList","apiVersion":"v1","metadata":{"resourceVersion":"492"},"items":[{"metadata":{"name":"multinode-541903","uid":"d8ecdc0c-ca08-4762-8f06-41e51e5fca21","resourceVersion":"460","creationTimestamp":"2023-02-23T04:37:28Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"multinode-541903","kubernetes.io/os":"linux","minikube.k8s.io/commit":"66d56dc3ac28a702789778ac47e90f12526a0321","minikube.k8s.io/name":"multinode-541903","minikube.k8s.io/primary":"true","minikube.k8s.io/updated_at":"2023_02_23T04_37_32_0700","minikube.k8s.io/version":"v1.29.0","node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/cri-dockerd.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFiel
ds":[{"manager":"kubelet","operation":"Update","apiVersion":"v1","time" [truncated 10265 chars]
I0223 04:38:05.873728 154064 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0223 04:38:05.873745 154064 node_conditions.go:123] node cpu capacity is 8
I0223 04:38:05.873757 154064 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0223 04:38:05.873768 154064 node_conditions.go:123] node cpu capacity is 8
I0223 04:38:05.873775 154064 node_conditions.go:105] duration metric: took 193.144418ms to run NodePressure ...
I0223 04:38:05.873791 154064 start.go:228] waiting for startup goroutines ...
I0223 04:38:05.873824 154064 start.go:242] writing updated cluster config ...
I0223 04:38:05.874082 154064 ssh_runner.go:195] Run: rm -f paused
I0223 04:38:05.920223 154064 start.go:555] kubectl: 1.26.1, cluster: 1.26.1 (minor skew: 0)
I0223 04:38:05.922674 154064 out.go:177] * Done! kubectl is now configured to use "multinode-541903" cluster and "default" namespace by default
*
* ==> Docker <==
* -- Logs begin at Thu 2023-02-23 04:37:13 UTC, end at Thu 2023-02-23 04:38:11 UTC. --
Feb 23 04:37:16 multinode-541903 dockerd[942]: time="2023-02-23T04:37:16.982738821Z" level=info msg="[core] [Channel #4] Channel authority set to \"localhost\"" module=grpc
Feb 23 04:37:16 multinode-541903 dockerd[942]: time="2023-02-23T04:37:16.982775819Z" level=info msg="[core] [Channel #4] Resolver state updated: {\n \"Addresses\": [\n {\n \"Addr\": \"/run/containerd/containerd.sock\",\n \"ServerName\": \"\",\n \"Attributes\": {},\n \"BalancerAttributes\": null,\n \"Type\": 0,\n \"Metadata\": null\n }\n ],\n \"ServiceConfig\": null,\n \"Attributes\": null\n} (resolver returned new addresses)" module=grpc
Feb 23 04:37:16 multinode-541903 dockerd[942]: time="2023-02-23T04:37:16.982793339Z" level=info msg="[core] [Channel #4] Channel switches to new LB policy \"pick_first\"" module=grpc
Feb 23 04:37:16 multinode-541903 dockerd[942]: time="2023-02-23T04:37:16.982807879Z" level=info msg="[core] [Channel #4 SubChannel #5] Subchannel created" module=grpc
Feb 23 04:37:16 multinode-541903 dockerd[942]: time="2023-02-23T04:37:16.982845179Z" level=info msg="[core] [Channel #4 SubChannel #5] Subchannel Connectivity change to CONNECTING" module=grpc
Feb 23 04:37:16 multinode-541903 dockerd[942]: time="2023-02-23T04:37:16.982878002Z" level=info msg="[core] [Channel #4 SubChannel #5] Subchannel picks a new address \"/run/containerd/containerd.sock\" to connect" module=grpc
Feb 23 04:37:16 multinode-541903 dockerd[942]: time="2023-02-23T04:37:16.983098589Z" level=info msg="[core] [Channel #4] Channel Connectivity change to CONNECTING" module=grpc
Feb 23 04:37:16 multinode-541903 dockerd[942]: time="2023-02-23T04:37:16.983777256Z" level=info msg="[core] [Channel #4 SubChannel #5] Subchannel Connectivity change to READY" module=grpc
Feb 23 04:37:16 multinode-541903 dockerd[942]: time="2023-02-23T04:37:16.983930395Z" level=info msg="[core] [Channel #4] Channel Connectivity change to READY" module=grpc
Feb 23 04:37:16 multinode-541903 dockerd[942]: time="2023-02-23T04:37:16.988244981Z" level=info msg="[graphdriver] trying configured driver: overlay2"
Feb 23 04:37:16 multinode-541903 dockerd[942]: time="2023-02-23T04:37:16.999235215Z" level=info msg="Loading containers: start."
Feb 23 04:37:17 multinode-541903 dockerd[942]: time="2023-02-23T04:37:17.071369686Z" level=info msg="Default bridge (docker0) is assigned with an IP address 172.17.0.0/16. Daemon option --bip can be used to set a preferred IP address"
Feb 23 04:37:17 multinode-541903 dockerd[942]: time="2023-02-23T04:37:17.102366076Z" level=info msg="Loading containers: done."
Feb 23 04:37:17 multinode-541903 dockerd[942]: time="2023-02-23T04:37:17.110653262Z" level=info msg="Docker daemon" commit=bc3805a graphdriver=overlay2 version=23.0.1
Feb 23 04:37:17 multinode-541903 dockerd[942]: time="2023-02-23T04:37:17.110697470Z" level=info msg="Daemon has completed initialization"
Feb 23 04:37:17 multinode-541903 dockerd[942]: time="2023-02-23T04:37:17.122862766Z" level=info msg="[core] [Server #7] Server created" module=grpc
Feb 23 04:37:17 multinode-541903 systemd[1]: Started Docker Application Container Engine.
Feb 23 04:37:17 multinode-541903 dockerd[942]: time="2023-02-23T04:37:17.127337213Z" level=info msg="API listen on [::]:2376"
Feb 23 04:37:17 multinode-541903 dockerd[942]: time="2023-02-23T04:37:17.133971980Z" level=info msg="API listen on /var/run/docker.sock"
Feb 23 04:37:59 multinode-541903 dockerd[942]: time="2023-02-23T04:37:59.139178100Z" level=info msg="ignoring event" container=6f11eb4875809ce6bbb9d5742076520cfd230818f0d79ecc123f25f18fce3942 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Feb 23 04:37:59 multinode-541903 dockerd[942]: time="2023-02-23T04:37:59.228420110Z" level=info msg="ignoring event" container=ca10a6f336621cc67a6988c31ad42c6441445f59a300e11b9b54572277c12fac module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Feb 23 04:37:59 multinode-541903 dockerd[942]: time="2023-02-23T04:37:59.528691310Z" level=info msg="ignoring event" container=0facd63c6ea8ef2ec8fd655a685c5e7a6416c8c499c223615119367ca64a802e module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Feb 23 04:37:59 multinode-541903 dockerd[942]: time="2023-02-23T04:37:59.604361149Z" level=info msg="ignoring event" container=02857e7f4f8084590de20a19df06dbd8f414afb16fdc0d542a9009bd55a32d6e module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Feb 23 04:38:08 multinode-541903 dockerd[942]: time="2023-02-23T04:38:08.935646204Z" level=info msg="ignoring event" container=4108fc9720e226598b1c0ea677f1bb8f0ddc683227d0d3547d2c95e3ae9f71fc module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Feb 23 04:38:09 multinode-541903 dockerd[942]: time="2023-02-23T04:38:09.020901590Z" level=info msg="ignoring event" container=1c6fda1d2125d0389ae4d3ed4c9f1afa7af50eed2907bfe6eb3298616f4c2761 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
*
* ==> container status <==
* CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID
04762ca59750b gcr.io/k8s-minikube/busybox@sha256:9afb80db71730dbb303fe00765cbf34bddbdc6b66e49897fc2e1861967584b12 4 seconds ago Running busybox 0 f98aa7349c865
eec447878870c 5185b96f0becf 12 seconds ago Running coredns 1 014f49b5e0368
4ca71ab194adc kindest/kindnetd@sha256:273469d84ede51824194a31f6a405e3d3686b8b87cd161ea40f6bc3ff8e04ffe 24 seconds ago Running kindnet-cni 0 77d965caffd4c
086a9642a939b 6e38f40d628db 25 seconds ago Running storage-provisioner 0 6df28d0fb52c3
0facd63c6ea8e 5185b96f0becf 25 seconds ago Exited coredns 0 02857e7f4f808
fd945f388f718 46a6bb3c77ce0 27 seconds ago Running kube-proxy 0 56fb935a5fb67
d30142adc440c e9c08e11b07f6 45 seconds ago Running kube-controller-manager 0 0394a1ad457b8
c291796ca0d8f fce326961ae2d 45 seconds ago Running etcd 0 ac8a1e3db22b3
a726dda492314 655493523f607 45 seconds ago Running kube-scheduler 0 7e70f0fc730b2
f5bcc90b919ad deb04688c4a35 45 seconds ago Running kube-apiserver 0 a55f5c40c5f28
*
* ==> coredns [0facd63c6ea8] <==
* [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] SIGTERM: Shutting down servers then terminating
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[WARNING] plugin/kubernetes: starting server with unsynced Kubernetes API
.:53
[INFO] plugin/reload: Running configuration SHA512 = 591cf328cccc12bc490481273e738df59329c62c0b729d94e8b61db9961c2fa5f046dd37f1cf888b953814040d180f52594972691cd6ff41be96639138a43908
CoreDNS-1.9.3
linux/amd64, go1.18.2, 45b0a11
[INFO] plugin/health: Going into lameduck mode for 5s
[WARNING] plugin/kubernetes: Kubernetes API connection failure: Get "https://10.96.0.1:443/version": dial tcp 10.96.0.1:443: connect: network is unreachable
[ERROR] plugin/errors: 2 5466950289786134975.3426005732708472887. HINFO: dial udp 192.168.58.1:53: connect: network is unreachable
[ERROR] plugin/errors: 2 5466950289786134975.3426005732708472887. HINFO: dial udp 192.168.58.1:53: connect: network is unreachable
*
* ==> coredns [eec447878870] <==
* .:53
[INFO] plugin/reload: Running configuration SHA512 = 75e5db48a73272e2c90919c8256e5cca0293ae0ed689e2ed44f1254a9589c3d004cb3e693d059116718c47e9305987b828b11b2735a1cefa59e4a9489dda5cee
CoreDNS-1.9.3
linux/amd64, go1.18.2, 45b0a11
[INFO] 127.0.0.1:59715 - 52923 "HINFO IN 6959884868566700602.57516180172716048. udp 55 false 512" NXDOMAIN qr,rd,ra 55 0.006656998s
[INFO] 10.244.0.4:50932 - 2 "PTR IN 10.0.96.10.in-addr.arpa. udp 41 false 512" NOERROR qr,aa,rd 116 0.000263374s
[INFO] 10.244.0.4:39533 - 3 "AAAA IN kubernetes.io. udp 31 false 512" NOERROR qr,rd,ra 31 0.037565927s
[INFO] 10.244.0.4:37036 - 4 "A IN kubernetes.io. udp 31 false 512" NOERROR qr,rd,ra 60 0.01091867s
[INFO] 10.244.0.4:55378 - 5 "PTR IN 148.40.75.147.in-addr.arpa. udp 44 false 512" NXDOMAIN qr,rd,ra 44 0.00854614s
[INFO] 10.244.0.4:58497 - 2 "PTR IN 10.0.96.10.in-addr.arpa. udp 41 false 512" NOERROR qr,aa,rd 116 0.000155831s
[INFO] 10.244.0.4:55032 - 3 "AAAA IN kubernetes.default. udp 36 false 512" NXDOMAIN qr,rd,ra 36 0.007141681s
[INFO] 10.244.0.4:44304 - 4 "AAAA IN kubernetes.default.default.svc.cluster.local. udp 62 false 512" NXDOMAIN qr,aa,rd 155 0.000181098s
[INFO] 10.244.0.4:44316 - 5 "AAAA IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 147 0.000140356s
[INFO] 10.244.0.4:52606 - 6 "A IN kubernetes.default. udp 36 false 512" NXDOMAIN qr,rd,ra 36 0.004894136s
[INFO] 10.244.0.4:44975 - 7 "A IN kubernetes.default.default.svc.cluster.local. udp 62 false 512" NXDOMAIN qr,aa,rd 155 0.00011015s
[INFO] 10.244.0.4:44530 - 8 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 106 0.00014463s
[INFO] 10.244.0.4:37716 - 9 "PTR IN 1.0.96.10.in-addr.arpa. udp 40 false 512" NOERROR qr,aa,rd 112 0.000125151s
[INFO] 10.244.0.4:34018 - 2 "PTR IN 10.0.96.10.in-addr.arpa. udp 41 false 512" NOERROR qr,aa,rd 116 0.000149591s
[INFO] 10.244.0.4:45536 - 3 "AAAA IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 147 0.000112286s
[INFO] 10.244.0.4:46158 - 4 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 106 0.000089777s
[INFO] 10.244.0.4:33255 - 5 "PTR IN 1.0.96.10.in-addr.arpa. udp 40 false 512" NOERROR qr,aa,rd 112 0.000106232s
*
* ==> describe nodes <==
* Name: multinode-541903
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=multinode-541903
kubernetes.io/os=linux
minikube.k8s.io/commit=66d56dc3ac28a702789778ac47e90f12526a0321
minikube.k8s.io/name=multinode-541903
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2023_02_23T04_37_32_0700
minikube.k8s.io/version=v1.29.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///var/run/cri-dockerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Thu, 23 Feb 2023 04:37:28 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: multinode-541903
AcquireTime: <unset>
RenewTime: Thu, 23 Feb 2023 04:38:02 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Thu, 23 Feb 2023 04:38:02 +0000 Thu, 23 Feb 2023 04:37:27 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Thu, 23 Feb 2023 04:38:02 +0000 Thu, 23 Feb 2023 04:37:27 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Thu, 23 Feb 2023 04:38:02 +0000 Thu, 23 Feb 2023 04:37:27 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Thu, 23 Feb 2023 04:38:02 +0000 Thu, 23 Feb 2023 04:37:32 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.58.2
Hostname: multinode-541903
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32871740Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32871740Ki
pods: 110
System Info:
Machine ID: d2c8c90d305d4c21867ffd1b1748456b
System UUID: 0a6db97e-c1ce-4256-b2a4-fa5731d9603c
Boot ID: 41cdd137-5e6d-47c9-932e-81b157c8e132
Kernel Version: 5.15.0-1029-gcp
OS Image: Ubuntu 20.04.5 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://23.0.1
Kubelet Version: v1.26.1
Kube-Proxy Version: v1.26.1
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (9 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox-6b86dd6d48-x6jgm 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 5s
kube-system coredns-787d4945fb-qcmgw 100m (1%!)(MISSING) 0 (0%!)(MISSING) 70Mi (0%!)(MISSING) 170Mi (0%!)(MISSING) 27s
kube-system etcd-multinode-541903 100m (1%!)(MISSING) 0 (0%!)(MISSING) 100Mi (0%!)(MISSING) 0 (0%!)(MISSING) 39s
kube-system kindnet-gnlxp 100m (1%!)(MISSING) 100m (1%!)(MISSING) 50Mi (0%!)(MISSING) 50Mi (0%!)(MISSING) 27s
kube-system kube-apiserver-multinode-541903 250m (3%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 39s
kube-system kube-controller-manager-multinode-541903 200m (2%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 40s
kube-system kube-proxy-b9nwm 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 27s
kube-system kube-scheduler-multinode-541903 100m (1%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 39s
kube-system storage-provisioner 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 26s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (10%!)(MISSING) 100m (1%!)(MISSING)
memory 220Mi (0%!)(MISSING) 220Mi (0%!)(MISSING)
ephemeral-storage 0 (0%!)(MISSING) 0 (0%!)(MISSING)
hugepages-1Gi 0 (0%!)(MISSING) 0 (0%!)(MISSING)
hugepages-2Mi 0 (0%!)(MISSING) 0 (0%!)(MISSING)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 26s kube-proxy
Normal Starting 40s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 40s kubelet Node multinode-541903 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 40s kubelet Node multinode-541903 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 40s kubelet Node multinode-541903 status is now: NodeHasSufficientPID
Normal NodeNotReady 40s kubelet Node multinode-541903 status is now: NodeNotReady
Normal NodeAllocatableEnforced 39s kubelet Updated Node Allocatable limit across pods
Normal NodeReady 39s kubelet Node multinode-541903 status is now: NodeReady
Normal RegisteredNode 28s node-controller Node multinode-541903 event: Registered Node multinode-541903 in Controller
Name: multinode-541903-m02
Roles: <none>
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=multinode-541903-m02
kubernetes.io/os=linux
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /var/run/cri-dockerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Thu, 23 Feb 2023 04:38:02 +0000
Taints: <none>
Unschedulable: false
Lease: Failed to get lease: leases.coordination.k8s.io "multinode-541903-m02" not found
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Thu, 23 Feb 2023 04:38:02 +0000 Thu, 23 Feb 2023 04:38:01 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Thu, 23 Feb 2023 04:38:02 +0000 Thu, 23 Feb 2023 04:38:01 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Thu, 23 Feb 2023 04:38:02 +0000 Thu, 23 Feb 2023 04:38:01 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Thu, 23 Feb 2023 04:38:02 +0000 Thu, 23 Feb 2023 04:38:02 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.58.3
Hostname: multinode-541903-m02
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32871740Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32871740Ki
pods: 110
System Info:
Machine ID: d2c8c90d305d4c21867ffd1b1748456b
System UUID: 05539598-01dc-4456-ba7e-32ad34500ddd
Boot ID: 41cdd137-5e6d-47c9-932e-81b157c8e132
Kernel Version: 5.15.0-1029-gcp
OS Image: Ubuntu 20.04.5 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://23.0.1
Kubelet Version: v1.26.1
Kube-Proxy Version: v1.26.1
PodCIDR: 10.244.1.0/24
PodCIDRs: 10.244.1.0/24
Non-terminated Pods: (3 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox-6b86dd6d48-dtw6s 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 5s
kube-system kindnet-7mcb2 100m (1%!)(MISSING) 100m (1%!)(MISSING) 50Mi (0%!)(MISSING) 50Mi (0%!)(MISSING) 9s
kube-system kube-proxy-g9qpb 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 9s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 100m (1%!)(MISSING) 100m (1%!)(MISSING)
memory 50Mi (0%!)(MISSING) 50Mi (0%!)(MISSING)
ephemeral-storage 0 (0%!)(MISSING) 0 (0%!)(MISSING)
hugepages-1Gi 0 (0%!)(MISSING) 0 (0%!)(MISSING)
hugepages-2Mi 0 (0%!)(MISSING) 0 (0%!)(MISSING)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 7s kube-proxy
Normal Starting 10s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 10s (x2 over 10s) kubelet Node multinode-541903-m02 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 10s (x2 over 10s) kubelet Node multinode-541903-m02 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 10s (x2 over 10s) kubelet Node multinode-541903-m02 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 10s kubelet Updated Node Allocatable limit across pods
Normal NodeReady 9s kubelet Node multinode-541903-m02 status is now: NodeReady
Normal RegisteredNode 8s node-controller Node multinode-541903-m02 event: Registered Node multinode-541903-m02 in Controller
*
* ==> dmesg <==
* [ +0.008739] FS-Cache: O-key=[8] '7aa00f0200000000'
[ +0.006315] FS-Cache: N-cookie c=0000000d [p=00000003 fl=2 nc=0 na=1]
[ +0.007943] FS-Cache: N-cookie d=00000000be8bfdab{9p.inode} n=00000000155d6f34
[ +0.008747] FS-Cache: N-key=[8] '7aa00f0200000000'
[ +3.008494] FS-Cache: Duplicate cookie detected
[ +0.004682] FS-Cache: O-cookie c=00000006 [p=00000003 fl=226 nc=0 na=1]
[ +0.006745] FS-Cache: O-cookie d=00000000be8bfdab{9p.inode} n=0000000096e3f4f6
[ +0.007360] FS-Cache: O-key=[8] '79a00f0200000000'
[ +0.004951] FS-Cache: N-cookie c=0000000f [p=00000003 fl=2 nc=0 na=1]
[ +0.006573] FS-Cache: N-cookie d=00000000be8bfdab{9p.inode} n=0000000058c77510
[ +0.007359] FS-Cache: N-key=[8] '79a00f0200000000'
[ +0.534321] FS-Cache: Duplicate cookie detected
[ +0.004699] FS-Cache: O-cookie c=00000009 [p=00000003 fl=226 nc=0 na=1]
[ +0.006767] FS-Cache: O-cookie d=00000000be8bfdab{9p.inode} n=0000000067cf95b5
[ +0.007350] FS-Cache: O-key=[8] '87a00f0200000000'
[ +0.004927] FS-Cache: N-cookie c=00000010 [p=00000003 fl=2 nc=0 na=1]
[ +0.006586] FS-Cache: N-cookie d=00000000be8bfdab{9p.inode} n=0000000096b81734
[ +0.007350] FS-Cache: N-key=[8] '87a00f0200000000'
[ +8.026583] IPv4: martian source 10.244.0.1 from 10.244.0.14, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff fe a7 25 28 cf 87 08 06
[Feb23 04:30] kmem.limit_in_bytes is deprecated and will be removed. Please report your usecase to linux-mm@kvack.org if you depend on this functionality.
[Feb23 04:32] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff c2 b7 0e f6 05 fb 08 06
[Feb23 04:36] IPv4: martian source 10.244.0.1 from 10.244.0.2, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff c6 41 4f 6e 45 d7 08 06
*
* ==> etcd [c291796ca0d8] <==
* {"level":"info","ts":"2023-02-23T04:37:26.410Z","caller":"membership/cluster.go:421","msg":"added member","cluster-id":"3a56e4ca95e2355c","local-member-id":"b2c6679ac05f2cf1","added-peer-id":"b2c6679ac05f2cf1","added-peer-peer-urls":["https://192.168.58.2:2380"]}
{"level":"info","ts":"2023-02-23T04:37:26.412Z","caller":"embed/etcd.go:687","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
{"level":"info","ts":"2023-02-23T04:37:26.412Z","caller":"embed/etcd.go:275","msg":"now serving peer/client/metrics","local-member-id":"b2c6679ac05f2cf1","initial-advertise-peer-urls":["https://192.168.58.2:2380"],"listen-peer-urls":["https://192.168.58.2:2380"],"advertise-client-urls":["https://192.168.58.2:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.58.2:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
{"level":"info","ts":"2023-02-23T04:37:26.412Z","caller":"embed/etcd.go:762","msg":"serving metrics","address":"http://127.0.0.1:2381"}
{"level":"info","ts":"2023-02-23T04:37:26.412Z","caller":"embed/etcd.go:586","msg":"serving peer traffic","address":"192.168.58.2:2380"}
{"level":"info","ts":"2023-02-23T04:37:26.412Z","caller":"embed/etcd.go:558","msg":"cmux::serve","address":"192.168.58.2:2380"}
{"level":"info","ts":"2023-02-23T04:37:26.600Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"b2c6679ac05f2cf1 is starting a new election at term 1"}
{"level":"info","ts":"2023-02-23T04:37:26.600Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"b2c6679ac05f2cf1 became pre-candidate at term 1"}
{"level":"info","ts":"2023-02-23T04:37:26.600Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"b2c6679ac05f2cf1 received MsgPreVoteResp from b2c6679ac05f2cf1 at term 1"}
{"level":"info","ts":"2023-02-23T04:37:26.600Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"b2c6679ac05f2cf1 became candidate at term 2"}
{"level":"info","ts":"2023-02-23T04:37:26.600Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"b2c6679ac05f2cf1 received MsgVoteResp from b2c6679ac05f2cf1 at term 2"}
{"level":"info","ts":"2023-02-23T04:37:26.600Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"b2c6679ac05f2cf1 became leader at term 2"}
{"level":"info","ts":"2023-02-23T04:37:26.600Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: b2c6679ac05f2cf1 elected leader b2c6679ac05f2cf1 at term 2"}
{"level":"info","ts":"2023-02-23T04:37:26.601Z","caller":"etcdserver/server.go:2563","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2023-02-23T04:37:26.602Z","caller":"embed/serve.go:100","msg":"ready to serve client requests"}
{"level":"info","ts":"2023-02-23T04:37:26.602Z","caller":"etcdserver/server.go:2054","msg":"published local member to cluster through raft","local-member-id":"b2c6679ac05f2cf1","local-member-attributes":"{Name:multinode-541903 ClientURLs:[https://192.168.58.2:2379]}","request-path":"/0/members/b2c6679ac05f2cf1/attributes","cluster-id":"3a56e4ca95e2355c","publish-timeout":"7s"}
{"level":"info","ts":"2023-02-23T04:37:26.602Z","caller":"embed/serve.go:100","msg":"ready to serve client requests"}
{"level":"info","ts":"2023-02-23T04:37:26.602Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"3a56e4ca95e2355c","local-member-id":"b2c6679ac05f2cf1","cluster-version":"3.5"}
{"level":"info","ts":"2023-02-23T04:37:26.602Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2023-02-23T04:37:26.602Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2023-02-23T04:37:26.602Z","caller":"etcdserver/server.go:2587","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2023-02-23T04:37:26.602Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2023-02-23T04:37:26.603Z","caller":"embed/serve.go:198","msg":"serving client traffic securely","address":"127.0.0.1:2379"}
{"level":"info","ts":"2023-02-23T04:37:26.603Z","caller":"embed/serve.go:198","msg":"serving client traffic securely","address":"192.168.58.2:2379"}
{"level":"info","ts":"2023-02-23T04:37:52.887Z","caller":"traceutil/trace.go:171","msg":"trace[158574339] transaction","detail":"{read_only:false; response_revision:400; number_of_response:1; }","duration":"148.144815ms","start":"2023-02-23T04:37:52.739Z","end":"2023-02-23T04:37:52.887Z","steps":["trace[158574339] 'process raft request' (duration: 147.972226ms)"],"step_count":1}
*
* ==> kernel <==
* 04:38:11 up 20 min, 0 users, load average: 1.49, 1.79, 1.13
Linux multinode-541903 5.15.0-1029-gcp #36~20.04.1-Ubuntu SMP Tue Jan 24 16:54:15 UTC 2023 x86_64 x86_64 x86_64 GNU/Linux
PRETTY_NAME="Ubuntu 20.04.5 LTS"
*
* ==> kindnet [4ca71ab194ad] <==
* I0223 04:37:47.387652 1 main.go:102] connected to apiserver: https://10.96.0.1:443
I0223 04:37:47.387707 1 main.go:107] hostIP = 192.168.58.2
podIP = 192.168.58.2
I0223 04:37:47.387829 1 main.go:116] setting mtu 1500 for CNI
I0223 04:37:47.387850 1 main.go:146] kindnetd IP family: "ipv4"
I0223 04:37:47.387863 1 main.go:150] noMask IPv4 subnets: [10.244.0.0/16]
I0223 04:37:47.689752 1 main.go:223] Handling node with IPs: map[192.168.58.2:{}]
I0223 04:37:47.689779 1 main.go:227] handling current node
I0223 04:37:57.703417 1 main.go:223] Handling node with IPs: map[192.168.58.2:{}]
I0223 04:37:57.703445 1 main.go:227] handling current node
I0223 04:38:07.715132 1 main.go:223] Handling node with IPs: map[192.168.58.2:{}]
I0223 04:38:07.715156 1 main.go:227] handling current node
I0223 04:38:07.715165 1 main.go:223] Handling node with IPs: map[192.168.58.3:{}]
I0223 04:38:07.715170 1 main.go:250] Node multinode-541903-m02 has CIDR [10.244.1.0/24]
I0223 04:38:07.715325 1 routes.go:62] Adding route {Ifindex: 0 Dst: 10.244.1.0/24 Src: <nil> Gw: 192.168.58.3 Flags: [] Table: 0}
*
* ==> kube-apiserver [f5bcc90b919a] <==
* I0223 04:37:28.817415 1 cache.go:39] Caches are synced for APIServiceRegistrationController controller
I0223 04:37:28.817458 1 shared_informer.go:280] Caches are synced for configmaps
I0223 04:37:28.817535 1 apf_controller.go:366] Running API Priority and Fairness config worker
I0223 04:37:28.817542 1 cache.go:39] Caches are synced for autoregister controller
I0223 04:37:28.817550 1 apf_controller.go:369] Running API Priority and Fairness periodic rebalancing process
I0223 04:37:28.817752 1 shared_informer.go:280] Caches are synced for cluster_authentication_trust_controller
I0223 04:37:28.817941 1 shared_informer.go:280] Caches are synced for crd-autoregister
I0223 04:37:28.820167 1 controller.go:615] quota admission added evaluator for: namespaces
I0223 04:37:28.999437 1 controller.go:615] quota admission added evaluator for: leases.coordination.k8s.io
I0223 04:37:29.513008 1 controller.go:132] OpenAPI AggregationController: action for item k8s_internal_local_delegation_chain_0000000000: Nothing (removed from the queue).
I0223 04:37:29.721826 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I0223 04:37:29.726165 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I0223 04:37:29.726181 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I0223 04:37:30.080127 1 controller.go:615] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I0223 04:37:30.111030 1 controller.go:615] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I0223 04:37:30.200128 1 alloc.go:327] "allocated clusterIPs" service="default/kubernetes" clusterIPs=map[IPv4:10.96.0.1]
W0223 04:37:30.204940 1 lease.go:251] Resetting endpoints for master service "kubernetes" to [192.168.58.2]
I0223 04:37:30.205697 1 controller.go:615] quota admission added evaluator for: endpoints
I0223 04:37:30.209333 1 controller.go:615] quota admission added evaluator for: endpointslices.discovery.k8s.io
I0223 04:37:30.749571 1 controller.go:615] quota admission added evaluator for: serviceaccounts
I0223 04:37:31.745691 1 controller.go:615] quota admission added evaluator for: deployments.apps
I0223 04:37:31.755494 1 alloc.go:327] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs=map[IPv4:10.96.0.10]
I0223 04:37:31.762509 1 controller.go:615] quota admission added evaluator for: daemonsets.apps
I0223 04:37:44.357578 1 controller.go:615] quota admission added evaluator for: controllerrevisions.apps
I0223 04:37:44.458695 1 controller.go:615] quota admission added evaluator for: replicasets.apps
*
* ==> kube-controller-manager [d30142adc440] <==
* I0223 04:37:43.799244 1 event.go:294] "Event occurred" object="multinode-541903" fieldPath="" kind="Node" apiVersion="v1" type="Normal" reason="RegisteredNode" message="Node multinode-541903 event: Registered Node multinode-541903 in Controller"
I0223 04:37:43.799261 1 node_lifecycle_controller.go:1254] Controller detected that zone is now in state Normal.
I0223 04:37:43.799188 1 taint_manager.go:206] "Starting NoExecuteTaintManager"
I0223 04:37:43.799307 1 taint_manager.go:211] "Sending events to api server"
I0223 04:37:43.811540 1 shared_informer.go:280] Caches are synced for resource quota
I0223 04:37:44.133026 1 shared_informer.go:280] Caches are synced for garbage collector
I0223 04:37:44.147166 1 shared_informer.go:280] Caches are synced for garbage collector
I0223 04:37:44.147180 1 garbagecollector.go:163] Garbage collector: all resource monitors have synced. Proceeding to collect garbage
I0223 04:37:44.364387 1 event.go:294] "Event occurred" object="kube-system/kube-proxy" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-b9nwm"
I0223 04:37:44.366432 1 event.go:294] "Event occurred" object="kube-system/kindnet" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kindnet-gnlxp"
I0223 04:37:44.462398 1 event.go:294] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set coredns-787d4945fb to 2"
I0223 04:37:44.616321 1 event.go:294] "Event occurred" object="kube-system/coredns-787d4945fb" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-787d4945fb-qcmgw"
I0223 04:37:44.625047 1 event.go:294] "Event occurred" object="kube-system/coredns-787d4945fb" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-787d4945fb-m9kd9"
W0223 04:38:02.041773 1 actual_state_of_world.go:541] Failed to update statusUpdateNeeded field in actual state of world: Failed to set statusUpdateNeeded to needed true, because nodeName="multinode-541903-m02" does not exist
I0223 04:38:02.046486 1 range_allocator.go:372] Set node multinode-541903-m02 PodCIDR to [10.244.1.0/24]
I0223 04:38:02.049800 1 event.go:294] "Event occurred" object="kube-system/kindnet" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kindnet-7mcb2"
I0223 04:38:02.052128 1 event.go:294] "Event occurred" object="kube-system/kube-proxy" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-g9qpb"
W0223 04:38:02.757504 1 topologycache.go:232] Can't get CPU or zone information for multinode-541903-m02 node
I0223 04:38:03.158468 1 event.go:294] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-787d4945fb to 1 from 2"
I0223 04:38:03.162911 1 event.go:294] "Event occurred" object="kube-system/coredns-787d4945fb" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-787d4945fb-m9kd9"
W0223 04:38:03.802220 1 node_lifecycle_controller.go:1053] Missing timestamp for Node multinode-541903-m02. Assuming now as a timestamp.
I0223 04:38:03.802290 1 event.go:294] "Event occurred" object="multinode-541903-m02" fieldPath="" kind="Node" apiVersion="v1" type="Normal" reason="RegisteredNode" message="Node multinode-541903-m02 event: Registered Node multinode-541903-m02 in Controller"
I0223 04:38:06.920914 1 event.go:294] "Event occurred" object="default/busybox" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set busybox-6b86dd6d48 to 2"
I0223 04:38:06.927722 1 event.go:294] "Event occurred" object="default/busybox-6b86dd6d48" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox-6b86dd6d48-dtw6s"
I0223 04:38:06.934424 1 event.go:294] "Event occurred" object="default/busybox-6b86dd6d48" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox-6b86dd6d48-x6jgm"
*
* ==> kube-proxy [fd945f388f71] <==
* I0223 04:37:45.314934 1 node.go:163] Successfully retrieved node IP: 192.168.58.2
I0223 04:37:45.315002 1 server_others.go:109] "Detected node IP" address="192.168.58.2"
I0223 04:37:45.315018 1 server_others.go:535] "Using iptables proxy"
I0223 04:37:45.406416 1 server_others.go:176] "Using iptables Proxier"
I0223 04:37:45.406453 1 server_others.go:183] "kube-proxy running in dual-stack mode" ipFamily=IPv4
I0223 04:37:45.406464 1 server_others.go:184] "Creating dualStackProxier for iptables"
I0223 04:37:45.406497 1 server_others.go:465] "Detect-local-mode set to ClusterCIDR, but no IPv6 cluster CIDR defined, , defaulting to no-op detect-local for IPv6"
I0223 04:37:45.406532 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I0223 04:37:45.406855 1 server.go:655] "Version info" version="v1.26.1"
I0223 04:37:45.406873 1 server.go:657] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0223 04:37:45.408359 1 config.go:317] "Starting service config controller"
I0223 04:37:45.408380 1 shared_informer.go:273] Waiting for caches to sync for service config
I0223 04:37:45.408406 1 config.go:226] "Starting endpoint slice config controller"
I0223 04:37:45.408409 1 shared_informer.go:273] Waiting for caches to sync for endpoint slice config
I0223 04:37:45.409175 1 config.go:444] "Starting node config controller"
I0223 04:37:45.409197 1 shared_informer.go:273] Waiting for caches to sync for node config
I0223 04:37:45.508615 1 shared_informer.go:280] Caches are synced for service config
I0223 04:37:45.508615 1 shared_informer.go:280] Caches are synced for endpoint slice config
I0223 04:37:45.509728 1 shared_informer.go:280] Caches are synced for node config
*
* ==> kube-scheduler [a726dda49231] <==
* W0223 04:37:28.804357 1 reflector.go:424] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
W0223 04:37:28.804374 1 reflector.go:424] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E0223 04:37:28.804394 1 reflector.go:140] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
W0223 04:37:28.804375 1 reflector.go:424] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E0223 04:37:28.804412 1 reflector.go:140] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E0223 04:37:28.804362 1 reflector.go:140] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E0223 04:37:28.804344 1 reflector.go:140] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E0223 04:37:28.804375 1 reflector.go:140] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
W0223 04:37:28.804531 1 reflector.go:424] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E0223 04:37:28.804557 1 reflector.go:140] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
W0223 04:37:29.810957 1 reflector.go:424] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E0223 04:37:29.810998 1 reflector.go:140] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
W0223 04:37:29.829148 1 reflector.go:424] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E0223 04:37:29.829179 1 reflector.go:140] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
W0223 04:37:29.832175 1 reflector.go:424] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E0223 04:37:29.832207 1 reflector.go:140] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
W0223 04:37:29.858074 1 reflector.go:424] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E0223 04:37:29.858105 1 reflector.go:140] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
W0223 04:37:29.877891 1 reflector.go:424] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E0223 04:37:29.877916 1 reflector.go:140] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
W0223 04:37:29.938434 1 reflector.go:424] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E0223 04:37:29.938483 1 reflector.go:140] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
W0223 04:37:29.954330 1 reflector.go:424] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E0223 04:37:29.954355 1 reflector.go:140] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
I0223 04:37:30.202404 1 shared_informer.go:280] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
*
* ==> kubelet <==
* -- Logs begin at Thu 2023-02-23 04:37:13 UTC, end at Thu 2023-02-23 04:38:11 UTC. --
Feb 23 04:37:49 multinode-541903 kubelet[2326]: I0223 04:37:49.527654 2326 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/coredns-787d4945fb-qcmgw" podStartSLOduration=5.527601147 pod.CreationTimestamp="2023-02-23 04:37:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2023-02-23 04:37:49.52695016 +0000 UTC m=+17.801058512" watchObservedRunningTime="2023-02-23 04:37:49.527601147 +0000 UTC m=+17.801709502"
Feb 23 04:37:49 multinode-541903 kubelet[2326]: I0223 04:37:49.527841 2326 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=4.527811693 pod.CreationTimestamp="2023-02-23 04:37:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2023-02-23 04:37:49.125719114 +0000 UTC m=+17.399827466" watchObservedRunningTime="2023-02-23 04:37:49.527811693 +0000 UTC m=+17.801920046"
Feb 23 04:37:52 multinode-541903 kubelet[2326]: I0223 04:37:52.234039 2326 kuberuntime_manager.go:1114] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24"
Feb 23 04:37:52 multinode-541903 kubelet[2326]: I0223 04:37:52.234854 2326 kubelet_network.go:61] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
Feb 23 04:37:59 multinode-541903 kubelet[2326]: I0223 04:37:59.710417 2326 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ca10a6f336621cc67a6988c31ad42c6441445f59a300e11b9b54572277c12fac"
Feb 23 04:38:00 multinode-541903 kubelet[2326]: I0223 04:38:00.882553 2326 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="02857e7f4f8084590de20a19df06dbd8f414afb16fdc0d542a9009bd55a32d6e"
Feb 23 04:38:00 multinode-541903 kubelet[2326]: I0223 04:38:00.897722 2326 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kindnet-gnlxp" podStartSLOduration=-9.223372019957108e+09 pod.CreationTimestamp="2023-02-23 04:37:44 +0000 UTC" firstStartedPulling="2023-02-23 04:37:45.288063338 +0000 UTC m=+13.562171686" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2023-02-23 04:37:49.928874988 +0000 UTC m=+18.202983340" watchObservedRunningTime="2023-02-23 04:38:00.897668919 +0000 UTC m=+29.171777271"
Feb 23 04:38:06 multinode-541903 kubelet[2326]: I0223 04:38:06.938386 2326 topology_manager.go:210] "Topology Admit Handler"
Feb 23 04:38:07 multinode-541903 kubelet[2326]: I0223 04:38:07.066364 2326 reconciler_common.go:253] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-65fph\" (UniqueName: \"kubernetes.io/projected/2b1dfa52-a48e-426a-855a-0eae31fc2650-kube-api-access-65fph\") pod \"busybox-6b86dd6d48-x6jgm\" (UID: \"2b1dfa52-a48e-426a-855a-0eae31fc2650\") " pod="default/busybox-6b86dd6d48-x6jgm"
Feb 23 04:38:08 multinode-541903 kubelet[2326]: I0223 04:38:08.970861 2326 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="default/busybox-6b86dd6d48-x6jgm" podStartSLOduration=-9.223372033883957e+09 pod.CreationTimestamp="2023-02-23 04:38:06 +0000 UTC" firstStartedPulling="2023-02-23 04:38:07.487969346 +0000 UTC m=+35.762077715" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2023-02-23 04:38:08.970688403 +0000 UTC m=+37.244796755" watchObservedRunningTime="2023-02-23 04:38:08.970818247 +0000 UTC m=+37.244926599"
Feb 23 04:38:09 multinode-541903 kubelet[2326]: I0223 04:38:09.176599 2326 reconciler_common.go:169] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f98b834d-59f4-4ad0-8fc5-942542e67c77-config-volume\") pod \"f98b834d-59f4-4ad0-8fc5-942542e67c77\" (UID: \"f98b834d-59f4-4ad0-8fc5-942542e67c77\") "
Feb 23 04:38:09 multinode-541903 kubelet[2326]: I0223 04:38:09.176650 2326 reconciler_common.go:169] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v8dxr\" (UniqueName: \"kubernetes.io/projected/f98b834d-59f4-4ad0-8fc5-942542e67c77-kube-api-access-v8dxr\") pod \"f98b834d-59f4-4ad0-8fc5-942542e67c77\" (UID: \"f98b834d-59f4-4ad0-8fc5-942542e67c77\") "
Feb 23 04:38:09 multinode-541903 kubelet[2326]: W0223 04:38:09.176873 2326 empty_dir.go:525] Warning: Failed to clear quota on /var/lib/kubelet/pods/f98b834d-59f4-4ad0-8fc5-942542e67c77/volumes/kubernetes.io~configmap/config-volume: clearQuota called, but quotas disabled
Feb 23 04:38:09 multinode-541903 kubelet[2326]: I0223 04:38:09.177078 2326 operation_generator.go:890] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f98b834d-59f4-4ad0-8fc5-942542e67c77-config-volume" (OuterVolumeSpecName: "config-volume") pod "f98b834d-59f4-4ad0-8fc5-942542e67c77" (UID: "f98b834d-59f4-4ad0-8fc5-942542e67c77"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue ""
Feb 23 04:38:09 multinode-541903 kubelet[2326]: I0223 04:38:09.178490 2326 operation_generator.go:890] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f98b834d-59f4-4ad0-8fc5-942542e67c77-kube-api-access-v8dxr" (OuterVolumeSpecName: "kube-api-access-v8dxr") pod "f98b834d-59f4-4ad0-8fc5-942542e67c77" (UID: "f98b834d-59f4-4ad0-8fc5-942542e67c77"). InnerVolumeSpecName "kube-api-access-v8dxr". PluginName "kubernetes.io/projected", VolumeGidValue ""
Feb 23 04:38:09 multinode-541903 kubelet[2326]: I0223 04:38:09.277789 2326 reconciler_common.go:295] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f98b834d-59f4-4ad0-8fc5-942542e67c77-config-volume\") on node \"multinode-541903\" DevicePath \"\""
Feb 23 04:38:09 multinode-541903 kubelet[2326]: I0223 04:38:09.277831 2326 reconciler_common.go:295] "Volume detached for volume \"kube-api-access-v8dxr\" (UniqueName: \"kubernetes.io/projected/f98b834d-59f4-4ad0-8fc5-942542e67c77-kube-api-access-v8dxr\") on node \"multinode-541903\" DevicePath \"\""
Feb 23 04:38:09 multinode-541903 kubelet[2326]: I0223 04:38:09.975543 2326 scope.go:115] "RemoveContainer" containerID="4108fc9720e226598b1c0ea677f1bb8f0ddc683227d0d3547d2c95e3ae9f71fc"
Feb 23 04:38:10 multinode-541903 kubelet[2326]: I0223 04:38:10.000557 2326 scope.go:115] "RemoveContainer" containerID="6f11eb4875809ce6bbb9d5742076520cfd230818f0d79ecc123f25f18fce3942"
Feb 23 04:38:10 multinode-541903 kubelet[2326]: I0223 04:38:10.016076 2326 scope.go:115] "RemoveContainer" containerID="4108fc9720e226598b1c0ea677f1bb8f0ddc683227d0d3547d2c95e3ae9f71fc"
Feb 23 04:38:10 multinode-541903 kubelet[2326]: E0223 04:38:10.016756 2326 remote_runtime.go:415] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error: No such container: 4108fc9720e226598b1c0ea677f1bb8f0ddc683227d0d3547d2c95e3ae9f71fc" containerID="4108fc9720e226598b1c0ea677f1bb8f0ddc683227d0d3547d2c95e3ae9f71fc"
Feb 23 04:38:10 multinode-541903 kubelet[2326]: I0223 04:38:10.016807 2326 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={Type:docker ID:4108fc9720e226598b1c0ea677f1bb8f0ddc683227d0d3547d2c95e3ae9f71fc} err="failed to get container status \"4108fc9720e226598b1c0ea677f1bb8f0ddc683227d0d3547d2c95e3ae9f71fc\": rpc error: code = Unknown desc = Error: No such container: 4108fc9720e226598b1c0ea677f1bb8f0ddc683227d0d3547d2c95e3ae9f71fc"
Feb 23 04:38:10 multinode-541903 kubelet[2326]: I0223 04:38:10.016830 2326 scope.go:115] "RemoveContainer" containerID="6f11eb4875809ce6bbb9d5742076520cfd230818f0d79ecc123f25f18fce3942"
Feb 23 04:38:10 multinode-541903 kubelet[2326]: E0223 04:38:10.017448 2326 remote_runtime.go:415] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error: No such container: 6f11eb4875809ce6bbb9d5742076520cfd230818f0d79ecc123f25f18fce3942" containerID="6f11eb4875809ce6bbb9d5742076520cfd230818f0d79ecc123f25f18fce3942"
Feb 23 04:38:10 multinode-541903 kubelet[2326]: I0223 04:38:10.017485 2326 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={Type:docker ID:6f11eb4875809ce6bbb9d5742076520cfd230818f0d79ecc123f25f18fce3942} err="failed to get container status \"6f11eb4875809ce6bbb9d5742076520cfd230818f0d79ecc123f25f18fce3942\": rpc error: code = Unknown desc = Error: No such container: 6f11eb4875809ce6bbb9d5742076520cfd230818f0d79ecc123f25f18fce3942"
*
* ==> storage-provisioner [086a9642a939] <==
* I0223 04:37:46.702078 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I0223 04:37:46.709622 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I0223 04:37:46.709654 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I0223 04:37:46.715506 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I0223 04:37:46.715574 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"4a35914a-9e87-4b33-886a-f39e4dd1d90e", APIVersion:"v1", ResourceVersion:"375", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' multinode-541903_5dca9e66-b165-492e-a55e-54ef5c08754c became leader
I0223 04:37:46.715649 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_multinode-541903_5dca9e66-b165-492e-a55e-54ef5c08754c!
I0223 04:37:46.815826 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_multinode-541903_5dca9e66-b165-492e-a55e-54ef5c08754c!
-- /stdout --
helpers_test.go:254: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p multinode-541903 -n multinode-541903
helpers_test.go:261: (dbg) Run: kubectl --context multinode-541903 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:285: <<< TestMultiNode/serial/DeployApp2Nodes FAILED: end of post-mortem logs <<<
helpers_test.go:286: ---------------------/post-mortem---------------------------------
--- FAIL: TestMultiNode/serial/DeployApp2Nodes (5.98s)