=== RUN TestDockerEnvContainerd
docker_test.go:170: running with containerd true linux arm64
docker_test.go:181: (dbg) Run: out/minikube-linux-arm64 start -p dockerenv-940737 --driver=docker --container-runtime=containerd
docker_test.go:181: (dbg) Done: out/minikube-linux-arm64 start -p dockerenv-940737 --driver=docker --container-runtime=containerd: (29.121038399s)
docker_test.go:189: (dbg) Run: /bin/bash -c "out/minikube-linux-arm64 docker-env --ssh-host --ssh-add -p dockerenv-940737"
docker_test.go:189: (dbg) Done: /bin/bash -c "out/minikube-linux-arm64 docker-env --ssh-host --ssh-add -p dockerenv-940737": (1.132297345s)
docker_test.go:220: (dbg) Run: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-avXcaEN8zW6w/agent.604851" SSH_AGENT_PID="604852" DOCKER_HOST=ssh://docker@127.0.0.1:33509 docker version"
docker_test.go:243: (dbg) Run: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-avXcaEN8zW6w/agent.604851" SSH_AGENT_PID="604852" DOCKER_HOST=ssh://docker@127.0.0.1:33509 DOCKER_BUILDKIT=0 docker build -t local/minikube-dockerenv-containerd-test:latest testdata/docker-env"
docker_test.go:243: (dbg) Non-zero exit: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-avXcaEN8zW6w/agent.604851" SSH_AGENT_PID="604852" DOCKER_HOST=ssh://docker@127.0.0.1:33509 DOCKER_BUILDKIT=0 docker build -t local/minikube-dockerenv-containerd-test:latest testdata/docker-env": exit status 1 (1.155771901s)
-- stdout --
Sending build context to Docker daemon 2.048kB
-- /stdout --
** stderr **
DEPRECATED: The legacy builder is deprecated and will be removed in a future release.
BuildKit is currently disabled; enable it by removing the DOCKER_BUILDKIT=0
environment-variable.
Error response from daemon: exit status 1
** /stderr **
docker_test.go:245: failed to build images, error: exit status 1, output:
-- stdout --
Sending build context to Docker daemon 2.048kB
-- /stdout --
** stderr **
DEPRECATED: The legacy builder is deprecated and will be removed in a future release.
BuildKit is currently disabled; enable it by removing the DOCKER_BUILDKIT=0
environment-variable.
Error response from daemon: exit status 1
** /stderr **
docker_test.go:250: (dbg) Run: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-avXcaEN8zW6w/agent.604851" SSH_AGENT_PID="604852" DOCKER_HOST=ssh://docker@127.0.0.1:33509 docker image ls"
docker_test.go:255: failed to detect image 'local/minikube-dockerenv-containerd-test' in output of docker image ls
panic.go:631: *** TestDockerEnvContainerd FAILED at 2025-05-10 17:27:33.942285098 +0000 UTC m=+525.432608105
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======> post-mortem[TestDockerEnvContainerd]: docker inspect <======
helpers_test.go:231: (dbg) Run: docker inspect dockerenv-940737
helpers_test.go:235: (dbg) docker inspect dockerenv-940737:
-- stdout --
[
{
"Id": "c1c9d1926fbc51109bf73958424ea306d70779d30655e2f4aecb4ca703d2b633",
"Created": "2025-05-10T17:26:56.667792995Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 602346,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-05-10T17:26:56.728000706Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:8d0051f61c70b78ce0d27f52d63d179360406349b05070abb34de548473ac66d",
"ResolvConfPath": "/var/lib/docker/containers/c1c9d1926fbc51109bf73958424ea306d70779d30655e2f4aecb4ca703d2b633/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/c1c9d1926fbc51109bf73958424ea306d70779d30655e2f4aecb4ca703d2b633/hostname",
"HostsPath": "/var/lib/docker/containers/c1c9d1926fbc51109bf73958424ea306d70779d30655e2f4aecb4ca703d2b633/hosts",
"LogPath": "/var/lib/docker/containers/c1c9d1926fbc51109bf73958424ea306d70779d30655e2f4aecb4ca703d2b633/c1c9d1926fbc51109bf73958424ea306d70779d30655e2f4aecb4ca703d2b633-json.log",
"Name": "/dockerenv-940737",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"dockerenv-940737:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "dockerenv-940737",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 2306867200,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 4613734400,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "c1c9d1926fbc51109bf73958424ea306d70779d30655e2f4aecb4ca703d2b633",
"LowerDir": "/var/lib/docker/overlay2/910434af93154f9ad5e8510e0ac8fee29eb3c02dfaca4d62a3434f2d62bdfe80-init/diff:/var/lib/docker/overlay2/f53757f84525a314b7d3e39a04ce4e97d75f5bc16e7ee6805bc71f60238b3710/diff",
"MergedDir": "/var/lib/docker/overlay2/910434af93154f9ad5e8510e0ac8fee29eb3c02dfaca4d62a3434f2d62bdfe80/merged",
"UpperDir": "/var/lib/docker/overlay2/910434af93154f9ad5e8510e0ac8fee29eb3c02dfaca4d62a3434f2d62bdfe80/diff",
"WorkDir": "/var/lib/docker/overlay2/910434af93154f9ad5e8510e0ac8fee29eb3c02dfaca4d62a3434f2d62bdfe80/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "dockerenv-940737",
"Source": "/var/lib/docker/volumes/dockerenv-940737/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "dockerenv-940737",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1746731792-20718@sha256:074d9afa1e8827ea0e101248fc55098d304814b5d8bf485882a81afc90084155",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "dockerenv-940737",
"name.minikube.sigs.k8s.io": "dockerenv-940737",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "aa655f6017abf72f99be434c8b8bb13d04f63b180db47d05c4c2451b4a2b525d",
"SandboxKey": "/var/run/docker/netns/aa655f6017ab",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33509"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33510"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33513"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33511"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33512"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"dockerenv-940737": {
"IPAMConfig": {
"IPv4Address": "192.168.49.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "16:01:fb:41:9e:10",
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "ae0bacdc3ac5b3524d444215fc195e76a03a75cf5cd2a148a0e9741922d9361a",
"EndpointID": "c2c5b9cb3a290efeda2540d4f5e147cd05f14e98b91e3effc201546717d0afa7",
"Gateway": "192.168.49.1",
"IPAddress": "192.168.49.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"dockerenv-940737",
"c1c9d1926fbc"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:239: (dbg) Run: out/minikube-linux-arm64 status --format={{.Host}} -p dockerenv-940737 -n dockerenv-940737
helpers_test.go:244: <<< TestDockerEnvContainerd FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======> post-mortem[TestDockerEnvContainerd]: minikube logs <======
helpers_test.go:247: (dbg) Run: out/minikube-linux-arm64 -p dockerenv-940737 logs -n 25
helpers_test.go:247: (dbg) Done: out/minikube-linux-arm64 -p dockerenv-940737 logs -n 25: (1.259036246s)
helpers_test.go:252: TestDockerEnvContainerd logs:
-- stdout --
==> Audit <==
|------------|---------------------------------------------------------------------------------------------|------------------|---------|---------|---------------------|---------------------|
| Command | Args | Profile | User | Version | Start Time | End Time |
|------------|---------------------------------------------------------------------------------------------|------------------|---------|---------|---------------------|---------------------|
| addons | addons-901642 addons disable | addons-901642 | jenkins | v1.35.0 | 10 May 25 17:24 UTC | 10 May 25 17:24 UTC |
| | gcp-auth --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | enable headlamp | addons-901642 | jenkins | v1.35.0 | 10 May 25 17:24 UTC | 10 May 25 17:24 UTC |
| | -p addons-901642 | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-901642 addons disable | addons-901642 | jenkins | v1.35.0 | 10 May 25 17:24 UTC | 10 May 25 17:24 UTC |
| | headlamp --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| ip | addons-901642 ip | addons-901642 | jenkins | v1.35.0 | 10 May 25 17:24 UTC | 10 May 25 17:24 UTC |
| addons | addons-901642 addons disable | addons-901642 | jenkins | v1.35.0 | 10 May 25 17:24 UTC | 10 May 25 17:24 UTC |
| | registry --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | addons-901642 addons | addons-901642 | jenkins | v1.35.0 | 10 May 25 17:25 UTC | 10 May 25 17:25 UTC |
| | disable metrics-server | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-901642 addons | addons-901642 | jenkins | v1.35.0 | 10 May 25 17:25 UTC | 10 May 25 17:25 UTC |
| | disable inspektor-gadget | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| ssh | addons-901642 ssh curl -s | addons-901642 | jenkins | v1.35.0 | 10 May 25 17:25 UTC | 10 May 25 17:25 UTC |
| | http://127.0.0.1/ -H 'Host: | | | | | |
| | nginx.example.com' | | | | | |
| ip | addons-901642 ip | addons-901642 | jenkins | v1.35.0 | 10 May 25 17:25 UTC | 10 May 25 17:25 UTC |
| addons | addons-901642 addons disable | addons-901642 | jenkins | v1.35.0 | 10 May 25 17:25 UTC | 10 May 25 17:25 UTC |
| | ingress-dns --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | addons-901642 addons | addons-901642 | jenkins | v1.35.0 | 10 May 25 17:25 UTC | 10 May 25 17:25 UTC |
| | disable volumesnapshots | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-901642 addons disable | addons-901642 | jenkins | v1.35.0 | 10 May 25 17:25 UTC | 10 May 25 17:25 UTC |
| | ingress --alsologtostderr -v=1 | | | | | |
| addons | addons-901642 addons | addons-901642 | jenkins | v1.35.0 | 10 May 25 17:25 UTC | 10 May 25 17:25 UTC |
| | disable csi-hostpath-driver | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-901642 addons | addons-901642 | jenkins | v1.35.0 | 10 May 25 17:25 UTC | 10 May 25 17:25 UTC |
| | disable nvidia-device-plugin | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-901642 addons disable | addons-901642 | jenkins | v1.35.0 | 10 May 25 17:25 UTC | 10 May 25 17:25 UTC |
| | yakd --alsologtostderr -v=1 | | | | | |
| ssh | addons-901642 ssh cat | addons-901642 | jenkins | v1.35.0 | 10 May 25 17:25 UTC | 10 May 25 17:25 UTC |
| | /opt/local-path-provisioner/pvc-efa37df7-607e-46cc-9ade-33bb5dca9c33_default_test-pvc/file1 | | | | | |
| addons | addons-901642 addons disable | addons-901642 | jenkins | v1.35.0 | 10 May 25 17:25 UTC | 10 May 25 17:26 UTC |
| | storage-provisioner-rancher | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-901642 addons | addons-901642 | jenkins | v1.35.0 | 10 May 25 17:25 UTC | 10 May 25 17:25 UTC |
| | disable cloud-spanner | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| stop | -p addons-901642 | addons-901642 | jenkins | v1.35.0 | 10 May 25 17:26 UTC | 10 May 25 17:26 UTC |
| addons | enable dashboard -p | addons-901642 | jenkins | v1.35.0 | 10 May 25 17:26 UTC | 10 May 25 17:26 UTC |
| | addons-901642 | | | | | |
| addons | disable dashboard -p | addons-901642 | jenkins | v1.35.0 | 10 May 25 17:26 UTC | 10 May 25 17:26 UTC |
| | addons-901642 | | | | | |
| addons | disable gvisor -p | addons-901642 | jenkins | v1.35.0 | 10 May 25 17:26 UTC | 10 May 25 17:26 UTC |
| | addons-901642 | | | | | |
| delete | -p addons-901642 | addons-901642 | jenkins | v1.35.0 | 10 May 25 17:26 UTC | 10 May 25 17:26 UTC |
| start | -p dockerenv-940737 | dockerenv-940737 | jenkins | v1.35.0 | 10 May 25 17:26 UTC | 10 May 25 17:27 UTC |
| | --driver=docker | | | | | |
| | --container-runtime=containerd | | | | | |
| docker-env | --ssh-host --ssh-add -p | dockerenv-940737 | jenkins | v1.35.0 | 10 May 25 17:27 UTC | 10 May 25 17:27 UTC |
| | dockerenv-940737 | | | | | |
|------------|---------------------------------------------------------------------------------------------|------------------|---------|---------|---------------------|---------------------|
==> Last Start <==
Log file created at: 2025/05/10 17:26:51
Running on machine: ip-172-31-31-251
Binary: Built with gc go1.24.0 for linux/arm64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0510 17:26:51.281147 601963 out.go:345] Setting OutFile to fd 1 ...
I0510 17:26:51.281320 601963 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0510 17:26:51.281324 601963 out.go:358] Setting ErrFile to fd 2...
I0510 17:26:51.281328 601963 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0510 17:26:51.281603 601963 root.go:338] Updating PATH: /home/jenkins/minikube-integration/20720-576361/.minikube/bin
I0510 17:26:51.282009 601963 out.go:352] Setting JSON to false
I0510 17:26:51.282916 601963 start.go:130] hostinfo: {"hostname":"ip-172-31-31-251","uptime":11356,"bootTime":1746886656,"procs":158,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1083-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"982e3628-3742-4b3e-bb63-ac1b07660ec7"}
I0510 17:26:51.282969 601963 start.go:140] virtualization:
I0510 17:26:51.287388 601963 out.go:177] * [dockerenv-940737] minikube v1.35.0 on Ubuntu 20.04 (arm64)
I0510 17:26:51.291209 601963 notify.go:220] Checking for updates...
I0510 17:26:51.291756 601963 out.go:177] - MINIKUBE_LOCATION=20720
I0510 17:26:51.295294 601963 out.go:177] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0510 17:26:51.299738 601963 out.go:177] - KUBECONFIG=/home/jenkins/minikube-integration/20720-576361/kubeconfig
I0510 17:26:51.303050 601963 out.go:177] - MINIKUBE_HOME=/home/jenkins/minikube-integration/20720-576361/.minikube
I0510 17:26:51.306159 601963 out.go:177] - MINIKUBE_BIN=out/minikube-linux-arm64
I0510 17:26:51.309807 601963 out.go:177] - MINIKUBE_FORCE_SYSTEMD=
I0510 17:26:51.313006 601963 driver.go:404] Setting default libvirt URI to qemu:///system
I0510 17:26:51.346665 601963 docker.go:123] docker version: linux-28.1.1:Docker Engine - Community
I0510 17:26:51.346780 601963 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0510 17:26:51.425850 601963 info.go:266] docker info: {ID:EOU5:DNGX:XN6V:L2FZ:UXRM:5TWK:EVUR:KC2F:GT7Z:Y4O4:GB77:5PD3 Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:36 OomKillDisable:true NGoroutines:52 SystemTime:2025-05-10 17:26:51.416759966 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1083-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-31-251 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I0510 17:26:51.425949 601963 docker.go:318] overlay module found
I0510 17:26:51.429393 601963 out.go:177] * Using the docker driver based on user configuration
I0510 17:26:51.432458 601963 start.go:304] selected driver: docker
I0510 17:26:51.432480 601963 start.go:908] validating driver "docker" against <nil>
I0510 17:26:51.432492 601963 start.go:919] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0510 17:26:51.432611 601963 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0510 17:26:51.483955 601963 info.go:266] docker info: {ID:EOU5:DNGX:XN6V:L2FZ:UXRM:5TWK:EVUR:KC2F:GT7Z:Y4O4:GB77:5PD3 Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:36 OomKillDisable:true NGoroutines:52 SystemTime:2025-05-10 17:26:51.474992233 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1083-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-31-251 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I0510 17:26:51.484104 601963 start_flags.go:311] no existing cluster config was found, will generate one from the flags
I0510 17:26:51.484387 601963 start_flags.go:394] Using suggested 2200MB memory alloc based on sys=7834MB, container=7834MB
I0510 17:26:51.484533 601963 start_flags.go:957] Wait components to verify : map[apiserver:true system_pods:true]
I0510 17:26:51.487643 601963 out.go:177] * Using Docker driver with root privileges
I0510 17:26:51.490625 601963 cni.go:84] Creating CNI manager for ""
I0510 17:26:51.490692 601963 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I0510 17:26:51.490700 601963 start_flags.go:320] Found "CNI" CNI - setting NetworkPlugin=cni
I0510 17:26:51.490791 601963 start.go:347] cluster config:
{Name:dockerenv-940737 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1746731792-20718@sha256:074d9afa1e8827ea0e101248fc55098d304814b5d8bf485882a81afc90084155 Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.33.0 ClusterName:dockerenv-940737 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerR
untime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.33.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0510 17:26:51.494124 601963 out.go:177] * Starting "dockerenv-940737" primary control-plane node in "dockerenv-940737" cluster
I0510 17:26:51.497072 601963 cache.go:121] Beginning downloading kic base image for docker with containerd
I0510 17:26:51.500017 601963 out.go:177] * Pulling base image v0.0.46-1746731792-20718 ...
I0510 17:26:51.502931 601963 preload.go:131] Checking if preload exists for k8s version v1.33.0 and runtime containerd
I0510 17:26:51.502992 601963 preload.go:146] Found local preload: /home/jenkins/minikube-integration/20720-576361/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.33.0-containerd-overlay2-arm64.tar.lz4
I0510 17:26:51.503000 601963 cache.go:56] Caching tarball of preloaded images
I0510 17:26:51.503012 601963 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1746731792-20718@sha256:074d9afa1e8827ea0e101248fc55098d304814b5d8bf485882a81afc90084155 in local docker daemon
I0510 17:26:51.503100 601963 preload.go:172] Found /home/jenkins/minikube-integration/20720-576361/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.33.0-containerd-overlay2-arm64.tar.lz4 in cache, skipping download
I0510 17:26:51.503109 601963 cache.go:59] Finished verifying existence of preloaded tar for v1.33.0 on containerd
I0510 17:26:51.503496 601963 profile.go:143] Saving config to /home/jenkins/minikube-integration/20720-576361/.minikube/profiles/dockerenv-940737/config.json ...
I0510 17:26:51.503528 601963 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20720-576361/.minikube/profiles/dockerenv-940737/config.json: {Name:mk9000a1310531dda2247ae176d2146773d64948 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0510 17:26:51.521760 601963 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1746731792-20718@sha256:074d9afa1e8827ea0e101248fc55098d304814b5d8bf485882a81afc90084155 in local docker daemon, skipping pull
I0510 17:26:51.521772 601963 cache.go:145] gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1746731792-20718@sha256:074d9afa1e8827ea0e101248fc55098d304814b5d8bf485882a81afc90084155 exists in daemon, skipping load
I0510 17:26:51.521790 601963 cache.go:230] Successfully downloaded all kic artifacts
I0510 17:26:51.521812 601963 start.go:360] acquireMachinesLock for dockerenv-940737: {Name:mkcc46948b491d0e2b393d7cd8b105d61ca264d1 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0510 17:26:51.522539 601963 start.go:364] duration metric: took 710.52µs to acquireMachinesLock for "dockerenv-940737"
I0510 17:26:51.522567 601963 start.go:93] Provisioning new machine with config: &{Name:dockerenv-940737 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1746731792-20718@sha256:074d9afa1e8827ea0e101248fc55098d304814b5d8bf485882a81afc90084155 Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.33.0 ClusterName:dockerenv-940737 Namespace:default APIServerHAVIP: APIServerNa
me:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.33.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath:
StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.33.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0510 17:26:51.522631 601963 start.go:125] createHost starting for "" (driver="docker")
I0510 17:26:51.526094 601963 out.go:235] * Creating docker container (CPUs=2, Memory=2200MB) ...
I0510 17:26:51.526324 601963 start.go:159] libmachine.API.Create for "dockerenv-940737" (driver="docker")
I0510 17:26:51.526354 601963 client.go:168] LocalClient.Create starting
I0510 17:26:51.526450 601963 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/20720-576361/.minikube/certs/ca.pem
I0510 17:26:51.526482 601963 main.go:141] libmachine: Decoding PEM data...
I0510 17:26:51.526495 601963 main.go:141] libmachine: Parsing certificate...
I0510 17:26:51.526545 601963 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/20720-576361/.minikube/certs/cert.pem
I0510 17:26:51.526566 601963 main.go:141] libmachine: Decoding PEM data...
I0510 17:26:51.526577 601963 main.go:141] libmachine: Parsing certificate...
I0510 17:26:51.526916 601963 cli_runner.go:164] Run: docker network inspect dockerenv-940737 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0510 17:26:51.542207 601963 cli_runner.go:211] docker network inspect dockerenv-940737 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0510 17:26:51.542275 601963 network_create.go:284] running [docker network inspect dockerenv-940737] to gather additional debugging logs...
I0510 17:26:51.542288 601963 cli_runner.go:164] Run: docker network inspect dockerenv-940737
W0510 17:26:51.555962 601963 cli_runner.go:211] docker network inspect dockerenv-940737 returned with exit code 1
I0510 17:26:51.555993 601963 network_create.go:287] error running [docker network inspect dockerenv-940737]: docker network inspect dockerenv-940737: exit status 1
stdout:
[]
stderr:
Error response from daemon: network dockerenv-940737 not found
I0510 17:26:51.556014 601963 network_create.go:289] output of [docker network inspect dockerenv-940737]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network dockerenv-940737 not found
** /stderr **
I0510 17:26:51.556109 601963 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0510 17:26:51.571880 601963 network.go:206] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x4001855800}
I0510 17:26:51.571908 601963 network_create.go:124] attempt to create docker network dockerenv-940737 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
I0510 17:26:51.571962 601963 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=dockerenv-940737 dockerenv-940737
I0510 17:26:51.630609 601963 network_create.go:108] docker network dockerenv-940737 192.168.49.0/24 created
I0510 17:26:51.630630 601963 kic.go:121] calculated static IP "192.168.49.2" for the "dockerenv-940737" container
I0510 17:26:51.630714 601963 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0510 17:26:51.646098 601963 cli_runner.go:164] Run: docker volume create dockerenv-940737 --label name.minikube.sigs.k8s.io=dockerenv-940737 --label created_by.minikube.sigs.k8s.io=true
I0510 17:26:51.663516 601963 oci.go:103] Successfully created a docker volume dockerenv-940737
I0510 17:26:51.663600 601963 cli_runner.go:164] Run: docker run --rm --name dockerenv-940737-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=dockerenv-940737 --entrypoint /usr/bin/test -v dockerenv-940737:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1746731792-20718@sha256:074d9afa1e8827ea0e101248fc55098d304814b5d8bf485882a81afc90084155 -d /var/lib
I0510 17:26:52.217743 601963 oci.go:107] Successfully prepared a docker volume dockerenv-940737
I0510 17:26:52.217778 601963 preload.go:131] Checking if preload exists for k8s version v1.33.0 and runtime containerd
I0510 17:26:52.217796 601963 kic.go:194] Starting extracting preloaded images to volume ...
I0510 17:26:52.217873 601963 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/20720-576361/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.33.0-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v dockerenv-940737:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1746731792-20718@sha256:074d9afa1e8827ea0e101248fc55098d304814b5d8bf485882a81afc90084155 -I lz4 -xf /preloaded.tar -C /extractDir
I0510 17:26:56.592321 601963 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/20720-576361/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.33.0-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v dockerenv-940737:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1746731792-20718@sha256:074d9afa1e8827ea0e101248fc55098d304814b5d8bf485882a81afc90084155 -I lz4 -xf /preloaded.tar -C /extractDir: (4.374413838s)
I0510 17:26:56.592342 601963 kic.go:203] duration metric: took 4.374543215s to extract preloaded images to volume ...
W0510 17:26:56.592763 601963 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I0510 17:26:56.592867 601963 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0510 17:26:56.653146 601963 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname dockerenv-940737 --name dockerenv-940737 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=dockerenv-940737 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=dockerenv-940737 --network dockerenv-940737 --ip 192.168.49.2 --volume dockerenv-940737:/var --security-opt apparmor=unconfined --memory=2200mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1746731792-20718@sha256:074d9afa1e8827ea0e101248fc55098d304814b5d8bf485882a81afc90084155
I0510 17:26:56.954216 601963 cli_runner.go:164] Run: docker container inspect dockerenv-940737 --format={{.State.Running}}
I0510 17:26:56.983981 601963 cli_runner.go:164] Run: docker container inspect dockerenv-940737 --format={{.State.Status}}
I0510 17:26:57.005926 601963 cli_runner.go:164] Run: docker exec dockerenv-940737 stat /var/lib/dpkg/alternatives/iptables
I0510 17:26:57.055579 601963 oci.go:144] the created container "dockerenv-940737" has a running status.
I0510 17:26:57.055598 601963 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/20720-576361/.minikube/machines/dockerenv-940737/id_rsa...
I0510 17:26:57.517067 601963 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/20720-576361/.minikube/machines/dockerenv-940737/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0510 17:26:57.551197 601963 cli_runner.go:164] Run: docker container inspect dockerenv-940737 --format={{.State.Status}}
I0510 17:26:57.577743 601963 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0510 17:26:57.577754 601963 kic_runner.go:114] Args: [docker exec --privileged dockerenv-940737 chown docker:docker /home/docker/.ssh/authorized_keys]
I0510 17:26:57.635237 601963 cli_runner.go:164] Run: docker container inspect dockerenv-940737 --format={{.State.Status}}
I0510 17:26:57.662243 601963 machine.go:93] provisionDockerMachine start ...
I0510 17:26:57.662396 601963 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-940737
I0510 17:26:57.688171 601963 main.go:141] libmachine: Using SSH client type: native
I0510 17:26:57.688504 601963 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e66e0] 0x3e8ea0 <nil> [] 0s} 127.0.0.1 33509 <nil> <nil>}
I0510 17:26:57.688512 601963 main.go:141] libmachine: About to run SSH command:
hostname
I0510 17:26:57.857296 601963 main.go:141] libmachine: SSH cmd err, output: <nil>: dockerenv-940737
I0510 17:26:57.857312 601963 ubuntu.go:169] provisioning hostname "dockerenv-940737"
I0510 17:26:57.857394 601963 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-940737
I0510 17:26:57.875793 601963 main.go:141] libmachine: Using SSH client type: native
I0510 17:26:57.876099 601963 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e66e0] 0x3e8ea0 <nil> [] 0s} 127.0.0.1 33509 <nil> <nil>}
I0510 17:26:57.876108 601963 main.go:141] libmachine: About to run SSH command:
sudo hostname dockerenv-940737 && echo "dockerenv-940737" | sudo tee /etc/hostname
I0510 17:26:58.033089 601963 main.go:141] libmachine: SSH cmd err, output: <nil>: dockerenv-940737
I0510 17:26:58.033170 601963 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-940737
I0510 17:26:58.056801 601963 main.go:141] libmachine: Using SSH client type: native
I0510 17:26:58.057101 601963 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e66e0] 0x3e8ea0 <nil> [] 0s} 127.0.0.1 33509 <nil> <nil>}
I0510 17:26:58.057116 601963 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sdockerenv-940737' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 dockerenv-940737/g' /etc/hosts;
else
echo '127.0.1.1 dockerenv-940737' | sudo tee -a /etc/hosts;
fi
fi
I0510 17:26:58.198129 601963 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0510 17:26:58.198143 601963 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/20720-576361/.minikube CaCertPath:/home/jenkins/minikube-integration/20720-576361/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/20720-576361/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/20720-576361/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/20720-576361/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/20720-576361/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/20720-576361/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/20720-576361/.minikube}
I0510 17:26:58.198164 601963 ubuntu.go:177] setting up certificates
I0510 17:26:58.198174 601963 provision.go:84] configureAuth start
I0510 17:26:58.198242 601963 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" dockerenv-940737
I0510 17:26:58.215435 601963 provision.go:143] copyHostCerts
I0510 17:26:58.215485 601963 exec_runner.go:144] found /home/jenkins/minikube-integration/20720-576361/.minikube/ca.pem, removing ...
I0510 17:26:58.215493 601963 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20720-576361/.minikube/ca.pem
I0510 17:26:58.215567 601963 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20720-576361/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/20720-576361/.minikube/ca.pem (1082 bytes)
I0510 17:26:58.215683 601963 exec_runner.go:144] found /home/jenkins/minikube-integration/20720-576361/.minikube/cert.pem, removing ...
I0510 17:26:58.215687 601963 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20720-576361/.minikube/cert.pem
I0510 17:26:58.215713 601963 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20720-576361/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/20720-576361/.minikube/cert.pem (1123 bytes)
I0510 17:26:58.215765 601963 exec_runner.go:144] found /home/jenkins/minikube-integration/20720-576361/.minikube/key.pem, removing ...
I0510 17:26:58.215770 601963 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20720-576361/.minikube/key.pem
I0510 17:26:58.215792 601963 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20720-576361/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/20720-576361/.minikube/key.pem (1675 bytes)
I0510 17:26:58.215835 601963 provision.go:117] generating server cert: /home/jenkins/minikube-integration/20720-576361/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/20720-576361/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/20720-576361/.minikube/certs/ca-key.pem org=jenkins.dockerenv-940737 san=[127.0.0.1 192.168.49.2 dockerenv-940737 localhost minikube]
I0510 17:26:58.685933 601963 provision.go:177] copyRemoteCerts
I0510 17:26:58.685987 601963 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0510 17:26:58.686043 601963 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-940737
I0510 17:26:58.702916 601963 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33509 SSHKeyPath:/home/jenkins/minikube-integration/20720-576361/.minikube/machines/dockerenv-940737/id_rsa Username:docker}
I0510 17:26:58.798264 601963 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20720-576361/.minikube/machines/server.pem --> /etc/docker/server.pem (1216 bytes)
I0510 17:26:58.823389 601963 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20720-576361/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I0510 17:26:58.847418 601963 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20720-576361/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I0510 17:26:58.870854 601963 provision.go:87] duration metric: took 672.657311ms to configureAuth
I0510 17:26:58.870871 601963 ubuntu.go:193] setting minikube options for container-runtime
I0510 17:26:58.871045 601963 config.go:182] Loaded profile config "dockerenv-940737": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.33.0
I0510 17:26:58.871051 601963 machine.go:96] duration metric: took 1.208798585s to provisionDockerMachine
I0510 17:26:58.871056 601963 client.go:171] duration metric: took 7.344697446s to LocalClient.Create
I0510 17:26:58.871077 601963 start.go:167] duration metric: took 7.344753988s to libmachine.API.Create "dockerenv-940737"
I0510 17:26:58.871083 601963 start.go:293] postStartSetup for "dockerenv-940737" (driver="docker")
I0510 17:26:58.871091 601963 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0510 17:26:58.871149 601963 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0510 17:26:58.871185 601963 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-940737
I0510 17:26:58.887655 601963 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33509 SSHKeyPath:/home/jenkins/minikube-integration/20720-576361/.minikube/machines/dockerenv-940737/id_rsa Username:docker}
I0510 17:26:58.982730 601963 ssh_runner.go:195] Run: cat /etc/os-release
I0510 17:26:58.985676 601963 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0510 17:26:58.985698 601963 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0510 17:26:58.985706 601963 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0510 17:26:58.985712 601963 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I0510 17:26:58.985721 601963 filesync.go:126] Scanning /home/jenkins/minikube-integration/20720-576361/.minikube/addons for local assets ...
I0510 17:26:58.985776 601963 filesync.go:126] Scanning /home/jenkins/minikube-integration/20720-576361/.minikube/files for local assets ...
I0510 17:26:58.985794 601963 start.go:296] duration metric: took 114.706144ms for postStartSetup
I0510 17:26:58.986122 601963 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" dockerenv-940737
I0510 17:26:59.004173 601963 profile.go:143] Saving config to /home/jenkins/minikube-integration/20720-576361/.minikube/profiles/dockerenv-940737/config.json ...
I0510 17:26:59.004516 601963 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0510 17:26:59.004574 601963 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-940737
I0510 17:26:59.021652 601963 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33509 SSHKeyPath:/home/jenkins/minikube-integration/20720-576361/.minikube/machines/dockerenv-940737/id_rsa Username:docker}
I0510 17:26:59.115002 601963 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0510 17:26:59.119445 601963 start.go:128] duration metric: took 7.596798885s to createHost
I0510 17:26:59.119459 601963 start.go:83] releasing machines lock for "dockerenv-940737", held for 7.59691237s
I0510 17:26:59.119542 601963 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" dockerenv-940737
I0510 17:26:59.136544 601963 ssh_runner.go:195] Run: cat /version.json
I0510 17:26:59.136587 601963 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-940737
I0510 17:26:59.136833 601963 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0510 17:26:59.136885 601963 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-940737
I0510 17:26:59.161848 601963 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33509 SSHKeyPath:/home/jenkins/minikube-integration/20720-576361/.minikube/machines/dockerenv-940737/id_rsa Username:docker}
I0510 17:26:59.170365 601963 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33509 SSHKeyPath:/home/jenkins/minikube-integration/20720-576361/.minikube/machines/dockerenv-940737/id_rsa Username:docker}
I0510 17:26:59.387490 601963 ssh_runner.go:195] Run: systemctl --version
I0510 17:26:59.391871 601963 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0510 17:26:59.396200 601963 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0510 17:26:59.420040 601963 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0510 17:26:59.420105 601963 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0510 17:26:59.450564 601963 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0510 17:26:59.450577 601963 start.go:495] detecting cgroup driver to use...
I0510 17:26:59.450607 601963 detect.go:187] detected "cgroupfs" cgroup driver on host os
I0510 17:26:59.450653 601963 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I0510 17:26:59.462976 601963 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0510 17:26:59.474218 601963 docker.go:225] disabling cri-docker service (if available) ...
I0510 17:26:59.474272 601963 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I0510 17:26:59.488396 601963 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I0510 17:26:59.502996 601963 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I0510 17:26:59.584907 601963 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I0510 17:26:59.678951 601963 docker.go:241] disabling docker service ...
I0510 17:26:59.679007 601963 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I0510 17:26:59.700566 601963 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I0510 17:26:59.713042 601963 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I0510 17:26:59.796764 601963 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I0510 17:26:59.891869 601963 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I0510 17:26:59.903719 601963 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0510 17:26:59.919736 601963 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10"|' /etc/containerd/config.toml"
I0510 17:26:59.929083 601963 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0510 17:26:59.939239 601963 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I0510 17:26:59.939306 601963 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I0510 17:26:59.949091 601963 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0510 17:26:59.959514 601963 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0510 17:26:59.969012 601963 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0510 17:26:59.979076 601963 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0510 17:26:59.987754 601963 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0510 17:26:59.997053 601963 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0510 17:27:00.018537 601963 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0510 17:27:00.060427 601963 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0510 17:27:00.079871 601963 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0510 17:27:00.095180 601963 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0510 17:27:00.274290 601963 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0510 17:27:00.436243 601963 start.go:542] Will wait 60s for socket path /run/containerd/containerd.sock
I0510 17:27:00.436311 601963 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I0510 17:27:00.440407 601963 start.go:563] Will wait 60s for crictl version
I0510 17:27:00.440465 601963 ssh_runner.go:195] Run: which crictl
I0510 17:27:00.444152 601963 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0510 17:27:00.482027 601963 start.go:579] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: 1.7.27
RuntimeApiVersion: v1
I0510 17:27:00.482096 601963 ssh_runner.go:195] Run: containerd --version
I0510 17:27:00.505221 601963 ssh_runner.go:195] Run: containerd --version
I0510 17:27:00.533305 601963 out.go:177] * Preparing Kubernetes v1.33.0 on containerd 1.7.27 ...
I0510 17:27:00.536238 601963 cli_runner.go:164] Run: docker network inspect dockerenv-940737 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0510 17:27:00.552056 601963 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0510 17:27:00.555641 601963 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0510 17:27:00.566231 601963 kubeadm.go:875] updating cluster {Name:dockerenv-940737 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1746731792-20718@sha256:074d9afa1e8827ea0e101248fc55098d304814b5d8bf485882a81afc90084155 Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.33.0 ClusterName:dockerenv-940737 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APISe
rverNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.33.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticI
P: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I0510 17:27:00.566333 601963 preload.go:131] Checking if preload exists for k8s version v1.33.0 and runtime containerd
I0510 17:27:00.566384 601963 ssh_runner.go:195] Run: sudo crictl images --output json
I0510 17:27:00.606768 601963 containerd.go:627] all images are preloaded for containerd runtime.
I0510 17:27:00.606780 601963 containerd.go:534] Images already preloaded, skipping extraction
I0510 17:27:00.606837 601963 ssh_runner.go:195] Run: sudo crictl images --output json
I0510 17:27:00.645051 601963 containerd.go:627] all images are preloaded for containerd runtime.
I0510 17:27:00.645063 601963 cache_images.go:84] Images are preloaded, skipping loading
I0510 17:27:00.645071 601963 kubeadm.go:926] updating node { 192.168.49.2 8443 v1.33.0 containerd true true} ...
I0510 17:27:00.645154 601963 kubeadm.go:938] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.33.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=dockerenv-940737 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
[Install]
config:
{KubernetesVersion:v1.33.0 ClusterName:dockerenv-940737 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0510 17:27:00.645214 601963 ssh_runner.go:195] Run: sudo crictl info
I0510 17:27:00.686036 601963 cni.go:84] Creating CNI manager for ""
I0510 17:27:00.686047 601963 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I0510 17:27:00.686055 601963 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I0510 17:27:00.686075 601963 kubeadm.go:189] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.33.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:dockerenv-940737 NodeName:dockerenv-940737 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPat
h:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0510 17:27:00.686182 601963 kubeadm.go:195] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.49.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "dockerenv-940737"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.49.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
- name: "proxy-refresh-interval"
value: "70000"
kubernetesVersion: v1.33.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0510 17:27:00.686247 601963 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.33.0
I0510 17:27:00.695872 601963 binaries.go:44] Found k8s binaries, skipping transfer
I0510 17:27:00.695929 601963 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I0510 17:27:00.704450 601963 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (320 bytes)
I0510 17:27:00.722112 601963 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0510 17:27:00.741040 601963 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2306 bytes)
I0510 17:27:00.758360 601963 ssh_runner.go:195] Run: grep 192.168.49.2 control-plane.minikube.internal$ /etc/hosts
I0510 17:27:00.761637 601963 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0510 17:27:00.772016 601963 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0510 17:27:00.867233 601963 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0510 17:27:00.882756 601963 certs.go:68] Setting up /home/jenkins/minikube-integration/20720-576361/.minikube/profiles/dockerenv-940737 for IP: 192.168.49.2
I0510 17:27:00.882767 601963 certs.go:194] generating shared ca certs ...
I0510 17:27:00.882792 601963 certs.go:226] acquiring lock for ca certs: {Name:mkfe9ed954168205918ea44d4015edac325c222c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0510 17:27:00.882958 601963 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/20720-576361/.minikube/ca.key
I0510 17:27:00.882999 601963 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/20720-576361/.minikube/proxy-client-ca.key
I0510 17:27:00.883015 601963 certs.go:256] generating profile certs ...
I0510 17:27:00.883068 601963 certs.go:363] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/20720-576361/.minikube/profiles/dockerenv-940737/client.key
I0510 17:27:00.883088 601963 crypto.go:68] Generating cert /home/jenkins/minikube-integration/20720-576361/.minikube/profiles/dockerenv-940737/client.crt with IP's: []
I0510 17:27:01.067516 601963 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/20720-576361/.minikube/profiles/dockerenv-940737/client.crt ...
I0510 17:27:01.067530 601963 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20720-576361/.minikube/profiles/dockerenv-940737/client.crt: {Name:mk0d5b5c40da791d84b689fd22aa4694b91a7ed1 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0510 17:27:01.067739 601963 crypto.go:164] Writing key to /home/jenkins/minikube-integration/20720-576361/.minikube/profiles/dockerenv-940737/client.key ...
I0510 17:27:01.067745 601963 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20720-576361/.minikube/profiles/dockerenv-940737/client.key: {Name:mk01bb202dcfb49f19d81565de203805a10caf9a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0510 17:27:01.068449 601963 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/20720-576361/.minikube/profiles/dockerenv-940737/apiserver.key.6f79c427
I0510 17:27:01.068463 601963 crypto.go:68] Generating cert /home/jenkins/minikube-integration/20720-576361/.minikube/profiles/dockerenv-940737/apiserver.crt.6f79c427 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2]
I0510 17:27:01.690120 601963 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/20720-576361/.minikube/profiles/dockerenv-940737/apiserver.crt.6f79c427 ...
I0510 17:27:01.690138 601963 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20720-576361/.minikube/profiles/dockerenv-940737/apiserver.crt.6f79c427: {Name:mk3dd0b17791cdb192e0ca54b2281088fa5e44ca Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0510 17:27:01.690361 601963 crypto.go:164] Writing key to /home/jenkins/minikube-integration/20720-576361/.minikube/profiles/dockerenv-940737/apiserver.key.6f79c427 ...
I0510 17:27:01.690370 601963 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20720-576361/.minikube/profiles/dockerenv-940737/apiserver.key.6f79c427: {Name:mk6d8635dedd3c24f572725a28d64746e2e842c1 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0510 17:27:01.690468 601963 certs.go:381] copying /home/jenkins/minikube-integration/20720-576361/.minikube/profiles/dockerenv-940737/apiserver.crt.6f79c427 -> /home/jenkins/minikube-integration/20720-576361/.minikube/profiles/dockerenv-940737/apiserver.crt
I0510 17:27:01.690549 601963 certs.go:385] copying /home/jenkins/minikube-integration/20720-576361/.minikube/profiles/dockerenv-940737/apiserver.key.6f79c427 -> /home/jenkins/minikube-integration/20720-576361/.minikube/profiles/dockerenv-940737/apiserver.key
I0510 17:27:01.690611 601963 certs.go:363] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/20720-576361/.minikube/profiles/dockerenv-940737/proxy-client.key
I0510 17:27:01.690623 601963 crypto.go:68] Generating cert /home/jenkins/minikube-integration/20720-576361/.minikube/profiles/dockerenv-940737/proxy-client.crt with IP's: []
I0510 17:27:01.988486 601963 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/20720-576361/.minikube/profiles/dockerenv-940737/proxy-client.crt ...
I0510 17:27:01.988500 601963 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20720-576361/.minikube/profiles/dockerenv-940737/proxy-client.crt: {Name:mka4cada7f77f8838f7453334612a3a378aa7869 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0510 17:27:01.989292 601963 crypto.go:164] Writing key to /home/jenkins/minikube-integration/20720-576361/.minikube/profiles/dockerenv-940737/proxy-client.key ...
I0510 17:27:01.989304 601963 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20720-576361/.minikube/profiles/dockerenv-940737/proxy-client.key: {Name:mk321def904a367087c671360d506c88a7e07b9c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0510 17:27:01.990068 601963 certs.go:484] found cert: /home/jenkins/minikube-integration/20720-576361/.minikube/certs/ca-key.pem (1675 bytes)
I0510 17:27:01.990105 601963 certs.go:484] found cert: /home/jenkins/minikube-integration/20720-576361/.minikube/certs/ca.pem (1082 bytes)
I0510 17:27:01.990132 601963 certs.go:484] found cert: /home/jenkins/minikube-integration/20720-576361/.minikube/certs/cert.pem (1123 bytes)
I0510 17:27:01.990152 601963 certs.go:484] found cert: /home/jenkins/minikube-integration/20720-576361/.minikube/certs/key.pem (1675 bytes)
I0510 17:27:01.992116 601963 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20720-576361/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0510 17:27:02.017785 601963 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20720-576361/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I0510 17:27:02.041976 601963 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20720-576361/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0510 17:27:02.066191 601963 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20720-576361/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I0510 17:27:02.090157 601963 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20720-576361/.minikube/profiles/dockerenv-940737/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I0510 17:27:02.114588 601963 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20720-576361/.minikube/profiles/dockerenv-940737/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I0510 17:27:02.138233 601963 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20720-576361/.minikube/profiles/dockerenv-940737/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0510 17:27:02.162649 601963 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20720-576361/.minikube/profiles/dockerenv-940737/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I0510 17:27:02.187313 601963 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20720-576361/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0510 17:27:02.211038 601963 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0510 17:27:02.229008 601963 ssh_runner.go:195] Run: openssl version
I0510 17:27:02.237178 601963 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0510 17:27:02.247792 601963 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0510 17:27:02.251513 601963 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 May 10 17:19 /usr/share/ca-certificates/minikubeCA.pem
I0510 17:27:02.251572 601963 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0510 17:27:02.259238 601963 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0510 17:27:02.268331 601963 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0510 17:27:02.271562 601963 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0510 17:27:02.271615 601963 kubeadm.go:392] StartCluster: {Name:dockerenv-940737 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1746731792-20718@sha256:074d9afa1e8827ea0e101248fc55098d304814b5d8bf485882a81afc90084155 Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.33.0 ClusterName:dockerenv-940737 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServe
rNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.33.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP:
SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0510 17:27:02.271678 601963 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I0510 17:27:02.271733 601963 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I0510 17:27:02.308615 601963 cri.go:89] found id: ""
I0510 17:27:02.308689 601963 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0510 17:27:02.317917 601963 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0510 17:27:02.326648 601963 kubeadm.go:214] ignoring SystemVerification for kubeadm because of docker driver
I0510 17:27:02.326704 601963 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0510 17:27:02.335608 601963 kubeadm.go:155] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0510 17:27:02.335616 601963 kubeadm.go:157] found existing configuration files:
I0510 17:27:02.335667 601963 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I0510 17:27:02.344491 601963 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I0510 17:27:02.344547 601963 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I0510 17:27:02.352833 601963 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I0510 17:27:02.361446 601963 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I0510 17:27:02.361500 601963 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I0510 17:27:02.369882 601963 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I0510 17:27:02.378840 601963 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I0510 17:27:02.378891 601963 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0510 17:27:02.392429 601963 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I0510 17:27:02.402794 601963 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I0510 17:27:02.402849 601963 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0510 17:27:02.411848 601963 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.33.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0510 17:27:02.461160 601963 kubeadm.go:310] [init] Using Kubernetes version: v1.33.0
I0510 17:27:02.461208 601963 kubeadm.go:310] [preflight] Running pre-flight checks
I0510 17:27:02.479765 601963 kubeadm.go:310] [preflight] The system verification failed. Printing the output from the verification:
I0510 17:27:02.479830 601963 kubeadm.go:310] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1083-aws[0m
I0510 17:27:02.479864 601963 kubeadm.go:310] [0;37mOS[0m: [0;32mLinux[0m
I0510 17:27:02.479922 601963 kubeadm.go:310] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0510 17:27:02.479969 601963 kubeadm.go:310] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I0510 17:27:02.480015 601963 kubeadm.go:310] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0510 17:27:02.480062 601963 kubeadm.go:310] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0510 17:27:02.480109 601963 kubeadm.go:310] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0510 17:27:02.480155 601963 kubeadm.go:310] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0510 17:27:02.480202 601963 kubeadm.go:310] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0510 17:27:02.480249 601963 kubeadm.go:310] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0510 17:27:02.480294 601963 kubeadm.go:310] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I0510 17:27:02.545339 601963 kubeadm.go:310] [preflight] Pulling images required for setting up a Kubernetes cluster
I0510 17:27:02.545440 601963 kubeadm.go:310] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0510 17:27:02.545535 601963 kubeadm.go:310] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I0510 17:27:02.551073 601963 kubeadm.go:310] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0510 17:27:02.557157 601963 out.go:235] - Generating certificates and keys ...
I0510 17:27:02.557293 601963 kubeadm.go:310] [certs] Using existing ca certificate authority
I0510 17:27:02.557377 601963 kubeadm.go:310] [certs] Using existing apiserver certificate and key on disk
I0510 17:27:02.902949 601963 kubeadm.go:310] [certs] Generating "apiserver-kubelet-client" certificate and key
I0510 17:27:03.243781 601963 kubeadm.go:310] [certs] Generating "front-proxy-ca" certificate and key
I0510 17:27:03.787274 601963 kubeadm.go:310] [certs] Generating "front-proxy-client" certificate and key
I0510 17:27:04.079021 601963 kubeadm.go:310] [certs] Generating "etcd/ca" certificate and key
I0510 17:27:04.583699 601963 kubeadm.go:310] [certs] Generating "etcd/server" certificate and key
I0510 17:27:04.583914 601963 kubeadm.go:310] [certs] etcd/server serving cert is signed for DNS names [dockerenv-940737 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0510 17:27:05.080288 601963 kubeadm.go:310] [certs] Generating "etcd/peer" certificate and key
I0510 17:27:05.080584 601963 kubeadm.go:310] [certs] etcd/peer serving cert is signed for DNS names [dockerenv-940737 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0510 17:27:05.533887 601963 kubeadm.go:310] [certs] Generating "etcd/healthcheck-client" certificate and key
I0510 17:27:05.805082 601963 kubeadm.go:310] [certs] Generating "apiserver-etcd-client" certificate and key
I0510 17:27:06.220615 601963 kubeadm.go:310] [certs] Generating "sa" key and public key
I0510 17:27:06.220822 601963 kubeadm.go:310] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0510 17:27:06.458274 601963 kubeadm.go:310] [kubeconfig] Writing "admin.conf" kubeconfig file
I0510 17:27:06.690537 601963 kubeadm.go:310] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I0510 17:27:07.673205 601963 kubeadm.go:310] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0510 17:27:08.136862 601963 kubeadm.go:310] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0510 17:27:09.007780 601963 kubeadm.go:310] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0510 17:27:09.007869 601963 kubeadm.go:310] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0510 17:27:09.008217 601963 kubeadm.go:310] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0510 17:27:09.011811 601963 out.go:235] - Booting up control plane ...
I0510 17:27:09.011920 601963 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0510 17:27:09.011995 601963 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0510 17:27:09.012059 601963 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0510 17:27:09.022783 601963 kubeadm.go:310] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0510 17:27:09.029907 601963 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0510 17:27:09.030174 601963 kubeadm.go:310] [kubelet-start] Starting the kubelet
I0510 17:27:09.139563 601963 kubeadm.go:310] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I0510 17:27:09.139674 601963 kubeadm.go:310] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I0510 17:27:10.157736 601963 kubeadm.go:310] [kubelet-check] The kubelet is healthy after 1.018328938s
I0510 17:27:10.160901 601963 kubeadm.go:310] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
I0510 17:27:10.160990 601963 kubeadm.go:310] [control-plane-check] Checking kube-apiserver at https://192.168.49.2:8443/livez
I0510 17:27:10.161238 601963 kubeadm.go:310] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
I0510 17:27:10.161320 601963 kubeadm.go:310] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
I0510 17:27:14.524597 601963 kubeadm.go:310] [control-plane-check] kube-controller-manager is healthy after 4.363328599s
I0510 17:27:16.314606 601963 kubeadm.go:310] [control-plane-check] kube-scheduler is healthy after 6.153643853s
I0510 17:27:17.162589 601963 kubeadm.go:310] [control-plane-check] kube-apiserver is healthy after 7.001618588s
I0510 17:27:17.181933 601963 kubeadm.go:310] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0510 17:27:17.196529 601963 kubeadm.go:310] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I0510 17:27:17.224209 601963 kubeadm.go:310] [upload-certs] Skipping phase. Please see --upload-certs
I0510 17:27:17.224399 601963 kubeadm.go:310] [mark-control-plane] Marking the node dockerenv-940737 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I0510 17:27:17.238325 601963 kubeadm.go:310] [bootstrap-token] Using token: pnqclp.ytodvh9o3r9cu7s6
I0510 17:27:17.241273 601963 out.go:235] - Configuring RBAC rules ...
I0510 17:27:17.241419 601963 kubeadm.go:310] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I0510 17:27:17.247484 601963 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I0510 17:27:17.255861 601963 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I0510 17:27:17.260054 601963 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I0510 17:27:17.267604 601963 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I0510 17:27:17.272083 601963 kubeadm.go:310] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I0510 17:27:17.569173 601963 kubeadm.go:310] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I0510 17:27:17.996627 601963 kubeadm.go:310] [addons] Applied essential addon: CoreDNS
I0510 17:27:18.569676 601963 kubeadm.go:310] [addons] Applied essential addon: kube-proxy
I0510 17:27:18.570653 601963 kubeadm.go:310]
I0510 17:27:18.570717 601963 kubeadm.go:310] Your Kubernetes control-plane has initialized successfully!
I0510 17:27:18.570722 601963 kubeadm.go:310]
I0510 17:27:18.570797 601963 kubeadm.go:310] To start using your cluster, you need to run the following as a regular user:
I0510 17:27:18.570807 601963 kubeadm.go:310]
I0510 17:27:18.570831 601963 kubeadm.go:310] mkdir -p $HOME/.kube
I0510 17:27:18.570888 601963 kubeadm.go:310] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I0510 17:27:18.570937 601963 kubeadm.go:310] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I0510 17:27:18.570940 601963 kubeadm.go:310]
I0510 17:27:18.570992 601963 kubeadm.go:310] Alternatively, if you are the root user, you can run:
I0510 17:27:18.570996 601963 kubeadm.go:310]
I0510 17:27:18.571041 601963 kubeadm.go:310] export KUBECONFIG=/etc/kubernetes/admin.conf
I0510 17:27:18.571045 601963 kubeadm.go:310]
I0510 17:27:18.571095 601963 kubeadm.go:310] You should now deploy a pod network to the cluster.
I0510 17:27:18.571168 601963 kubeadm.go:310] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I0510 17:27:18.571234 601963 kubeadm.go:310] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I0510 17:27:18.571237 601963 kubeadm.go:310]
I0510 17:27:18.571320 601963 kubeadm.go:310] You can now join any number of control-plane nodes by copying certificate authorities
I0510 17:27:18.571394 601963 kubeadm.go:310] and service account keys on each node and then running the following as root:
I0510 17:27:18.571399 601963 kubeadm.go:310]
I0510 17:27:18.571480 601963 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token pnqclp.ytodvh9o3r9cu7s6 \
I0510 17:27:18.571581 601963 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:19666f02605c966963ab80dcabc63399d088c4a1d2d8af636d5e78eab16f47e4 \
I0510 17:27:18.571601 601963 kubeadm.go:310] --control-plane
I0510 17:27:18.571604 601963 kubeadm.go:310]
I0510 17:27:18.571687 601963 kubeadm.go:310] Then you can join any number of worker nodes by running the following on each as root:
I0510 17:27:18.571690 601963 kubeadm.go:310]
I0510 17:27:18.572033 601963 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token pnqclp.ytodvh9o3r9cu7s6 \
I0510 17:27:18.572138 601963 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:19666f02605c966963ab80dcabc63399d088c4a1d2d8af636d5e78eab16f47e4
I0510 17:27:18.576580 601963 kubeadm.go:310] [WARNING SystemVerification]: cgroups v1 support is in maintenance mode, please migrate to cgroups v2
I0510 17:27:18.576794 601963 kubeadm.go:310] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1083-aws\n", err: exit status 1
I0510 17:27:18.576896 601963 kubeadm.go:310] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0510 17:27:18.576914 601963 cni.go:84] Creating CNI manager for ""
I0510 17:27:18.576920 601963 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I0510 17:27:18.579913 601963 out.go:177] * Configuring CNI (Container Networking Interface) ...
I0510 17:27:18.582820 601963 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I0510 17:27:18.586622 601963 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.33.0/kubectl ...
I0510 17:27:18.586635 601963 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I0510 17:27:18.607332 601963 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.33.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I0510 17:27:18.888902 601963 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I0510 17:27:18.889033 601963 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.33.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I0510 17:27:18.889121 601963 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.33.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes dockerenv-940737 minikube.k8s.io/updated_at=2025_05_10T17_27_18_0700 minikube.k8s.io/version=v1.35.0 minikube.k8s.io/commit=e96c83983357cd8557f3cdfe077a25cc73d485a4 minikube.k8s.io/name=dockerenv-940737 minikube.k8s.io/primary=true
I0510 17:27:19.089589 601963 ops.go:34] apiserver oom_adj: -16
I0510 17:27:19.089607 601963 kubeadm.go:1105] duration metric: took 200.624606ms to wait for elevateKubeSystemPrivileges
I0510 17:27:19.089620 601963 kubeadm.go:394] duration metric: took 16.818009932s to StartCluster
I0510 17:27:19.089635 601963 settings.go:142] acquiring lock: {Name:mk0ff2866fac8ab6b2ec95611c2f65673d1e5476 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0510 17:27:19.089695 601963 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/20720-576361/kubeconfig
I0510 17:27:19.090319 601963 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20720-576361/kubeconfig: {Name:mkf0c5e390f4706f2451d414f3fd57ceca53a75b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0510 17:27:19.090540 601963 start.go:235] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.33.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0510 17:27:19.090648 601963 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.33.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I0510 17:27:19.090933 601963 config.go:182] Loaded profile config "dockerenv-940737": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.33.0
I0510 17:27:19.090934 601963 addons.go:511] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I0510 17:27:19.091056 601963 addons.go:69] Setting storage-provisioner=true in profile "dockerenv-940737"
I0510 17:27:19.091070 601963 addons.go:238] Setting addon storage-provisioner=true in "dockerenv-940737"
I0510 17:27:19.091095 601963 host.go:66] Checking if "dockerenv-940737" exists ...
I0510 17:27:19.091562 601963 cli_runner.go:164] Run: docker container inspect dockerenv-940737 --format={{.State.Status}}
I0510 17:27:19.091691 601963 addons.go:69] Setting default-storageclass=true in profile "dockerenv-940737"
I0510 17:27:19.091702 601963 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "dockerenv-940737"
I0510 17:27:19.092038 601963 cli_runner.go:164] Run: docker container inspect dockerenv-940737 --format={{.State.Status}}
I0510 17:27:19.093703 601963 out.go:177] * Verifying Kubernetes components...
I0510 17:27:19.098039 601963 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0510 17:27:19.124179 601963 out.go:177] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0510 17:27:19.124183 601963 addons.go:238] Setting addon default-storageclass=true in "dockerenv-940737"
I0510 17:27:19.124211 601963 host.go:66] Checking if "dockerenv-940737" exists ...
I0510 17:27:19.124627 601963 cli_runner.go:164] Run: docker container inspect dockerenv-940737 --format={{.State.Status}}
I0510 17:27:19.127314 601963 addons.go:435] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0510 17:27:19.127325 601963 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0510 17:27:19.127381 601963 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-940737
I0510 17:27:19.161653 601963 addons.go:435] installing /etc/kubernetes/addons/storageclass.yaml
I0510 17:27:19.161666 601963 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0510 17:27:19.161738 601963 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-940737
I0510 17:27:19.169282 601963 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33509 SSHKeyPath:/home/jenkins/minikube-integration/20720-576361/.minikube/machines/dockerenv-940737/id_rsa Username:docker}
I0510 17:27:19.195529 601963 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33509 SSHKeyPath:/home/jenkins/minikube-integration/20720-576361/.minikube/machines/dockerenv-940737/id_rsa Username:docker}
I0510 17:27:19.332904 601963 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.33.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.33.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I0510 17:27:19.332995 601963 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0510 17:27:19.338720 601963 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.33.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0510 17:27:19.341031 601963 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.33.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0510 17:27:19.754426 601963 start.go:971] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
I0510 17:27:19.755891 601963 api_server.go:52] waiting for apiserver process to appear ...
I0510 17:27:19.755941 601963 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0510 17:27:19.897863 601963 api_server.go:72] duration metric: took 807.298882ms to wait for apiserver process to appear ...
I0510 17:27:19.897871 601963 api_server.go:88] waiting for apiserver healthz status ...
I0510 17:27:19.897884 601963 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I0510 17:27:19.908451 601963 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I0510 17:27:19.909986 601963 api_server.go:141] control plane version: v1.33.0
I0510 17:27:19.910000 601963 api_server.go:131] duration metric: took 12.124466ms to wait for apiserver health ...
I0510 17:27:19.910007 601963 system_pods.go:43] waiting for kube-system pods to appear ...
I0510 17:27:19.912781 601963 out.go:177] * Enabled addons: storage-provisioner, default-storageclass
I0510 17:27:19.912897 601963 system_pods.go:59] 5 kube-system pods found
I0510 17:27:19.912918 601963 system_pods.go:61] "etcd-dockerenv-940737" [b2e381b6-451e-4e18-aea0-22973ac978f8] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I0510 17:27:19.912928 601963 system_pods.go:61] "kube-apiserver-dockerenv-940737" [5bcfa44f-ac33-46a6-9055-7af9e6809d71] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I0510 17:27:19.912937 601963 system_pods.go:61] "kube-controller-manager-dockerenv-940737" [5fbee51f-1b5c-428b-a669-8452ce8e9af7] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I0510 17:27:19.912947 601963 system_pods.go:61] "kube-scheduler-dockerenv-940737" [0e79575b-e268-4f4f-8640-324beca3a21c] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I0510 17:27:19.912951 601963 system_pods.go:61] "storage-provisioner" [fdf65ddb-385b-449c-8a6e-e559e1095b47] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling.)
I0510 17:27:19.912954 601963 system_pods.go:74] duration metric: took 2.944342ms to wait for pod list to return data ...
I0510 17:27:19.912962 601963 kubeadm.go:578] duration metric: took 822.404178ms to wait for: map[apiserver:true system_pods:true]
I0510 17:27:19.912974 601963 node_conditions.go:102] verifying NodePressure condition ...
I0510 17:27:19.915705 601963 addons.go:514] duration metric: took 824.769979ms for enable addons: enabled=[storage-provisioner default-storageclass]
I0510 17:27:19.916315 601963 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
I0510 17:27:19.916332 601963 node_conditions.go:123] node cpu capacity is 2
I0510 17:27:19.916343 601963 node_conditions.go:105] duration metric: took 3.365058ms to run NodePressure ...
I0510 17:27:19.916353 601963 start.go:241] waiting for startup goroutines ...
I0510 17:27:20.258198 601963 kapi.go:214] "coredns" deployment in "kube-system" namespace and "dockerenv-940737" context rescaled to 1 replicas
I0510 17:27:20.258229 601963 start.go:246] waiting for cluster config update ...
I0510 17:27:20.258240 601963 start.go:255] writing updated cluster config ...
I0510 17:27:20.258542 601963 ssh_runner.go:195] Run: rm -f paused
I0510 17:27:20.318318 601963 start.go:607] kubectl: 1.33.0, cluster: 1.33.0 (minor skew: 0)
I0510 17:27:20.321689 601963 out.go:177] * Done! kubectl is now configured to use "dockerenv-940737" cluster and "default" namespace by default
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
22afd90d952e6 ee75e27fff91c 11 seconds ago Running kindnet-cni 0 6f747cadf2cd0 kindnet-6zjb8
5c34073b26db0 8d27a60846a20 11 seconds ago Running kube-proxy 0 7167d2994f522 kube-proxy-nttll
d4d5e664d34b5 ba04bb24b9575 11 seconds ago Running storage-provisioner 0 2f8a3a12d5010 storage-provisioner
285cca053d09c 31747a36ce712 24 seconds ago Running etcd 0 34134968c1a26 etcd-dockerenv-940737
f5196eec3f1ef 61f3acc54bb62 24 seconds ago Running kube-apiserver 0 1ef92c730922b kube-apiserver-dockerenv-940737
eab0b6c425999 5372350fd0a1e 24 seconds ago Running kube-controller-manager 0 197320ed6362a kube-controller-manager-dockerenv-940737
038c6e33c836d f8984990ac26b 24 seconds ago Running kube-scheduler 0 c4de7a4df95a0 kube-scheduler-dockerenv-940737
==> containerd <==
May 10 17:27:10 dockerenv-940737 containerd[840]: time="2025-05-10T17:27:10.944296743Z" level=info msg="StartContainer for \"eab0b6c425999797eed3c3d2485aaeeefa2a9ec05d1e657471af1010ff366a1d\" returns successfully"
May 10 17:27:10 dockerenv-940737 containerd[840]: time="2025-05-10T17:27:10.944635277Z" level=info msg="StartContainer for \"038c6e33c836d6a270c0d6fa7348371e004ba8140affd97c4802ec6e4cf91290\" returns successfully"
May 10 17:27:11 dockerenv-940737 containerd[840]: time="2025-05-10T17:27:11.058729815Z" level=info msg="StartContainer for \"f5196eec3f1ef88043643dbf65e8b3e93cf9f06d9cc4be2855e6933cc4bec917\" returns successfully"
May 10 17:27:11 dockerenv-940737 containerd[840]: time="2025-05-10T17:27:11.116684326Z" level=info msg="StartContainer for \"285cca053d09c38c3fe0ef753666a9f2414326880f251ea0bd8343d6fae952f2\" returns successfully"
May 10 17:27:22 dockerenv-940737 containerd[840]: time="2025-05-10T17:27:22.878892867Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:storage-provisioner,Uid:fdf65ddb-385b-449c-8a6e-e559e1095b47,Namespace:kube-system,Attempt:0,}"
May 10 17:27:22 dockerenv-940737 containerd[840]: time="2025-05-10T17:27:22.981996967Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:storage-provisioner,Uid:fdf65ddb-385b-449c-8a6e-e559e1095b47,Namespace:kube-system,Attempt:0,} returns sandbox id \"2f8a3a12d50109111b7c7d5192366de785efade817fddec494743ea69144e2d7\""
May 10 17:27:22 dockerenv-940737 containerd[840]: time="2025-05-10T17:27:22.994172626Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kube-proxy-nttll,Uid:d921cac3-cd47-4aa8-a134-daf41e1f22d1,Namespace:kube-system,Attempt:0,}"
May 10 17:27:23 dockerenv-940737 containerd[840]: time="2025-05-10T17:27:23.004521206Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kindnet-6zjb8,Uid:e47adb02-aece-490e-b915-083b095b90ac,Namespace:kube-system,Attempt:0,}"
May 10 17:27:23 dockerenv-940737 containerd[840]: time="2025-05-10T17:27:23.006733603Z" level=info msg="CreateContainer within sandbox \"2f8a3a12d50109111b7c7d5192366de785efade817fddec494743ea69144e2d7\" for container &ContainerMetadata{Name:storage-provisioner,Attempt:0,}"
May 10 17:27:23 dockerenv-940737 containerd[840]: time="2025-05-10T17:27:23.073543458Z" level=info msg="CreateContainer within sandbox \"2f8a3a12d50109111b7c7d5192366de785efade817fddec494743ea69144e2d7\" for &ContainerMetadata{Name:storage-provisioner,Attempt:0,} returns container id \"d4d5e664d34b53e2e5df2adf4edbb9eb107ae756e9309f951599ef3aabcb3360\""
May 10 17:27:23 dockerenv-940737 containerd[840]: time="2025-05-10T17:27:23.076468190Z" level=info msg="StartContainer for \"d4d5e664d34b53e2e5df2adf4edbb9eb107ae756e9309f951599ef3aabcb3360\""
May 10 17:27:23 dockerenv-940737 containerd[840]: time="2025-05-10T17:27:23.130903658Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kube-proxy-nttll,Uid:d921cac3-cd47-4aa8-a134-daf41e1f22d1,Namespace:kube-system,Attempt:0,} returns sandbox id \"7167d2994f522c3863e06d17f93cdd2d62e2f375312c4bdafa9911b762378bd0\""
May 10 17:27:23 dockerenv-940737 containerd[840]: time="2025-05-10T17:27:23.142156407Z" level=info msg="CreateContainer within sandbox \"7167d2994f522c3863e06d17f93cdd2d62e2f375312c4bdafa9911b762378bd0\" for container &ContainerMetadata{Name:kube-proxy,Attempt:0,}"
May 10 17:27:23 dockerenv-940737 containerd[840]: time="2025-05-10T17:27:23.179456417Z" level=info msg="CreateContainer within sandbox \"7167d2994f522c3863e06d17f93cdd2d62e2f375312c4bdafa9911b762378bd0\" for &ContainerMetadata{Name:kube-proxy,Attempt:0,} returns container id \"5c34073b26db0586f64776214ed398d39af663f33963da20f908d0f54b442f03\""
May 10 17:27:23 dockerenv-940737 containerd[840]: time="2025-05-10T17:27:23.193088070Z" level=info msg="StartContainer for \"5c34073b26db0586f64776214ed398d39af663f33963da20f908d0f54b442f03\""
May 10 17:27:23 dockerenv-940737 containerd[840]: time="2025-05-10T17:27:23.227287325Z" level=info msg="StartContainer for \"d4d5e664d34b53e2e5df2adf4edbb9eb107ae756e9309f951599ef3aabcb3360\" returns successfully"
May 10 17:27:23 dockerenv-940737 containerd[840]: time="2025-05-10T17:27:23.288932604Z" level=info msg="StartContainer for \"5c34073b26db0586f64776214ed398d39af663f33963da20f908d0f54b442f03\" returns successfully"
May 10 17:27:23 dockerenv-940737 containerd[840]: time="2025-05-10T17:27:23.351912123Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kindnet-6zjb8,Uid:e47adb02-aece-490e-b915-083b095b90ac,Namespace:kube-system,Attempt:0,} returns sandbox id \"6f747cadf2cd0c42f55a6d847f49f17204507e020ebc888c35f391e378f90363\""
May 10 17:27:23 dockerenv-940737 containerd[840]: time="2025-05-10T17:27:23.359743305Z" level=info msg="CreateContainer within sandbox \"6f747cadf2cd0c42f55a6d847f49f17204507e020ebc888c35f391e378f90363\" for container &ContainerMetadata{Name:kindnet-cni,Attempt:0,}"
May 10 17:27:23 dockerenv-940737 containerd[840]: time="2025-05-10T17:27:23.384403708Z" level=info msg="CreateContainer within sandbox \"6f747cadf2cd0c42f55a6d847f49f17204507e020ebc888c35f391e378f90363\" for &ContainerMetadata{Name:kindnet-cni,Attempt:0,} returns container id \"22afd90d952e64f7c12ff80ca5358ef3d9c540a3c0199d362401b5e8b69eda1c\""
May 10 17:27:23 dockerenv-940737 containerd[840]: time="2025-05-10T17:27:23.385182741Z" level=info msg="StartContainer for \"22afd90d952e64f7c12ff80ca5358ef3d9c540a3c0199d362401b5e8b69eda1c\""
May 10 17:27:23 dockerenv-940737 containerd[840]: time="2025-05-10T17:27:23.467318141Z" level=info msg="StartContainer for \"22afd90d952e64f7c12ff80ca5358ef3d9c540a3c0199d362401b5e8b69eda1c\" returns successfully"
May 10 17:27:23 dockerenv-940737 containerd[840]: time="2025-05-10T17:27:23.481543318Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-674b8bbfcf-gnw5f,Uid:f7b2ff82-68d5-4407-913c-dd86dd606133,Namespace:kube-system,Attempt:0,}"
May 10 17:27:23 dockerenv-940737 containerd[840]: time="2025-05-10T17:27:23.517801248Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-674b8bbfcf-gnw5f,Uid:f7b2ff82-68d5-4407-913c-dd86dd606133,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"890f39a058ded369808bb8d4d718e8adc2d0bdafc83f4b926584ce6db45b3d82\": failed to find network info for sandbox \"890f39a058ded369808bb8d4d718e8adc2d0bdafc83f4b926584ce6db45b3d82\""
May 10 17:27:28 dockerenv-940737 containerd[840]: time="2025-05-10T17:27:28.369508629Z" level=info msg="No cni config template is specified, wait for other system components to drop the config."
==> describe nodes <==
Name: dockerenv-940737
Roles: control-plane
Labels: beta.kubernetes.io/arch=arm64
beta.kubernetes.io/os=linux
kubernetes.io/arch=arm64
kubernetes.io/hostname=dockerenv-940737
kubernetes.io/os=linux
minikube.k8s.io/commit=e96c83983357cd8557f3cdfe077a25cc73d485a4
minikube.k8s.io/name=dockerenv-940737
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_05_10T17_27_18_0700
minikube.k8s.io/version=v1.35.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Sat, 10 May 2025 17:27:15 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: dockerenv-940737
AcquireTime: <unset>
RenewTime: Sat, 10 May 2025 17:27:28 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Sat, 10 May 2025 17:27:28 +0000 Sat, 10 May 2025 17:27:11 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Sat, 10 May 2025 17:27:28 +0000 Sat, 10 May 2025 17:27:11 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Sat, 10 May 2025 17:27:28 +0000 Sat, 10 May 2025 17:27:11 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Sat, 10 May 2025 17:27:28 +0000 Sat, 10 May 2025 17:27:15 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.2
Hostname: dockerenv-940737
Capacity:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
System Info:
Machine ID: ff3cb26aee244f3eb252db38642b121c
System UUID: e9f76bf3-8dda-4224-b65f-99e65eb528cf
Boot ID: 031bd395-9ef9-4833-9c39-69ef8b2481fc
Kernel Version: 5.15.0-1083-aws
OS Image: Ubuntu 22.04.5 LTS
Operating System: linux
Architecture: arm64
Container Runtime Version: containerd://1.7.27
Kubelet Version: v1.33.0
Kube-Proxy Version:
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (8 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
kube-system coredns-674b8bbfcf-gnw5f 100m (5%) 0 (0%) 70Mi (0%) 170Mi (2%) 12s
kube-system etcd-dockerenv-940737 100m (5%) 0 (0%) 100Mi (1%) 0 (0%) 17s
kube-system kindnet-6zjb8 100m (5%) 100m (5%) 50Mi (0%) 50Mi (0%) 13s
kube-system kube-apiserver-dockerenv-940737 250m (12%) 0 (0%) 0 (0%) 0 (0%) 17s
kube-system kube-controller-manager-dockerenv-940737 200m (10%) 0 (0%) 0 (0%) 0 (0%) 17s
kube-system kube-proxy-nttll 0 (0%) 0 (0%) 0 (0%) 0 (0%) 13s
kube-system kube-scheduler-dockerenv-940737 100m (5%) 0 (0%) 0 (0%) 0 (0%) 17s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 16s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (42%) 100m (5%)
memory 220Mi (2%) 220Mi (2%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
hugepages-32Mi 0 (0%) 0 (0%)
hugepages-64Ki 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 11s kube-proxy
Normal Starting 18s kubelet Starting kubelet.
Warning CgroupV1 18s kubelet cgroup v1 support is in maintenance mode, please migrate to cgroup v2
Normal NodeAllocatableEnforced 17s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 17s kubelet Node dockerenv-940737 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 17s kubelet Node dockerenv-940737 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 17s kubelet Node dockerenv-940737 status is now: NodeHasSufficientPID
Normal RegisteredNode 14s node-controller Node dockerenv-940737 event: Registered Node dockerenv-940737 in Controller
==> dmesg <==
[May10 16:56] overlayfs: '/var/lib/containers/storage/overlay/l/Q2QJNMTVZL6GMULS36RA5ZJGSA' not a directory
[ +0.389542] overlayfs: '/var/lib/containers/storage/overlay/l/ZLTOCNGE2IGM6DT7VP2QP7OV3M' not a directory
==> etcd [285cca053d09c38c3fe0ef753666a9f2414326880f251ea0bd8343d6fae952f2] <==
{"level":"info","ts":"2025-05-10T17:27:11.264697Z","caller":"embed/etcd.go:633","msg":"serving peer traffic","address":"192.168.49.2:2380"}
{"level":"info","ts":"2025-05-10T17:27:11.264736Z","caller":"embed/etcd.go:603","msg":"cmux::serve","address":"192.168.49.2:2380"}
{"level":"info","ts":"2025-05-10T17:27:11.264668Z","caller":"embed/etcd.go:762","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
{"level":"info","ts":"2025-05-10T17:27:11.265722Z","caller":"embed/etcd.go:908","msg":"serving metrics","address":"http://127.0.0.1:2381"}
{"level":"info","ts":"2025-05-10T17:27:11.265857Z","caller":"embed/etcd.go:292","msg":"now serving peer/client/metrics","local-member-id":"aec36adc501070cc","initial-advertise-peer-urls":["https://192.168.49.2:2380"],"listen-peer-urls":["https://192.168.49.2:2380"],"advertise-client-urls":["https://192.168.49.2:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.49.2:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
{"level":"info","ts":"2025-05-10T17:27:11.917674Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc is starting a new election at term 1"}
{"level":"info","ts":"2025-05-10T17:27:11.917787Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became pre-candidate at term 1"}
{"level":"info","ts":"2025-05-10T17:27:11.917905Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgPreVoteResp from aec36adc501070cc at term 1"}
{"level":"info","ts":"2025-05-10T17:27:11.919760Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became candidate at term 2"}
{"level":"info","ts":"2025-05-10T17:27:11.919814Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgVoteResp from aec36adc501070cc at term 2"}
{"level":"info","ts":"2025-05-10T17:27:11.919857Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became leader at term 2"}
{"level":"info","ts":"2025-05-10T17:27:11.919891Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: aec36adc501070cc elected leader aec36adc501070cc at term 2"}
{"level":"info","ts":"2025-05-10T17:27:11.923412Z","caller":"etcdserver/server.go:2697","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2025-05-10T17:27:11.923796Z","caller":"etcdserver/server.go:2144","msg":"published local member to cluster through raft","local-member-id":"aec36adc501070cc","local-member-attributes":"{Name:dockerenv-940737 ClientURLs:[https://192.168.49.2:2379]}","request-path":"/0/members/aec36adc501070cc/attributes","cluster-id":"fa54960ea34d58be","publish-timeout":"7s"}
{"level":"info","ts":"2025-05-10T17:27:11.923919Z","caller":"embed/serve.go:124","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-05-10T17:27:11.924283Z","caller":"embed/serve.go:124","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-05-10T17:27:11.924979Z","caller":"v3rpc/health.go:61","msg":"grpc service status changed","service":"","status":"SERVING"}
{"level":"info","ts":"2025-05-10T17:27:11.926804Z","caller":"embed/serve.go:275","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2025-05-10T17:27:11.934063Z","caller":"v3rpc/health.go:61","msg":"grpc service status changed","service":"","status":"SERVING"}
{"level":"info","ts":"2025-05-10T17:27:11.955040Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2025-05-10T17:27:11.955145Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2025-05-10T17:27:11.955719Z","caller":"embed/serve.go:275","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.49.2:2379"}
{"level":"info","ts":"2025-05-10T17:27:11.934403Z","caller":"membership/cluster.go:587","msg":"set initial cluster version","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","cluster-version":"3.5"}
{"level":"info","ts":"2025-05-10T17:27:11.973045Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2025-05-10T17:27:11.973090Z","caller":"etcdserver/server.go:2721","msg":"cluster version is updated","cluster-version":"3.5"}
==> kernel <==
17:27:35 up 3:09, 0 users, load average: 1.13, 1.69, 2.46
Linux dockerenv-940737 5.15.0-1083-aws #90~20.04.1-Ubuntu SMP Tue Apr 22 09:59:55 UTC 2025 aarch64 aarch64 aarch64 GNU/Linux
PRETTY_NAME="Ubuntu 22.04.5 LTS"
==> kindnet [22afd90d952e64f7c12ff80ca5358ef3d9c540a3c0199d362401b5e8b69eda1c] <==
I0510 17:27:23.540080 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I0510 17:27:23.630912 1 main.go:139] hostIP = 192.168.49.2
podIP = 192.168.49.2
I0510 17:27:23.631086 1 main.go:148] setting mtu 1500 for CNI
I0510 17:27:23.631120 1 main.go:178] kindnetd IP family: "ipv4"
I0510 17:27:23.631143 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
I0510 17:27:24.031783 1 controller.go:361] Starting controller kube-network-policies
I0510 17:27:24.031881 1 controller.go:365] Waiting for informer caches to sync
I0510 17:27:24.031908 1 shared_informer.go:313] Waiting for caches to sync for kube-network-policies
I0510 17:27:24.133047 1 shared_informer.go:320] Caches are synced for kube-network-policies
I0510 17:27:24.133287 1 metrics.go:61] Registering metrics
I0510 17:27:24.133487 1 controller.go:401] Syncing nftables rules
I0510 17:27:34.033669 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I0510 17:27:34.033733 1 main.go:301] handling current node
==> kube-apiserver [f5196eec3f1ef88043643dbf65e8b3e93cf9f06d9cc4be2855e6933cc4bec917] <==
I0510 17:27:15.406563 1 policy_source.go:240] refreshing policies
E0510 17:27:15.414306 1 controller.go:148] "Unhandled Error" err="while syncing ConfigMap \"kube-system/kube-apiserver-legacy-service-account-token-tracking\", err: namespaces \"kube-system\" not found" logger="UnhandledError"
I0510 17:27:15.470464 1 controller.go:667] quota admission added evaluator for: namespaces
I0510 17:27:15.472776 1 cidrallocator.go:301] created ClusterIP allocator for Service CIDR 10.96.0.0/12
I0510 17:27:15.476838 1 default_servicecidr_controller.go:214] Setting default ServiceCIDR condition Ready to True
I0510 17:27:15.517146 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12
I0510 17:27:15.520056 1 default_servicecidr_controller.go:136] Shutting down kubernetes-service-cidr-controller
I0510 17:27:15.593318 1 controller.go:667] quota admission added evaluator for: leases.coordination.k8s.io
I0510 17:27:16.164187 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I0510 17:27:16.173683 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I0510 17:27:16.173880 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I0510 17:27:16.886410 1 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I0510 17:27:16.934143 1 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I0510 17:27:17.078354 1 alloc.go:328] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W0510 17:27:17.085860 1 lease.go:265] Resetting endpoints for master service "kubernetes" to [192.168.49.2]
I0510 17:27:17.087150 1 controller.go:667] quota admission added evaluator for: endpoints
I0510 17:27:17.092528 1 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io
I0510 17:27:17.255242 1 controller.go:667] quota admission added evaluator for: serviceaccounts
I0510 17:27:17.976918 1 controller.go:667] quota admission added evaluator for: deployments.apps
I0510 17:27:17.994422 1 alloc.go:328] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I0510 17:27:18.007903 1 controller.go:667] quota admission added evaluator for: daemonsets.apps
I0510 17:27:22.652809 1 controller.go:667] quota admission added evaluator for: controllerrevisions.apps
I0510 17:27:22.752238 1 controller.go:667] quota admission added evaluator for: replicasets.apps
I0510 17:27:22.957941 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12
I0510 17:27:22.963008 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12
==> kube-controller-manager [eab0b6c425999797eed3c3d2485aaeeefa2a9ec05d1e657471af1010ff366a1d] <==
I0510 17:27:21.950823 1 shared_informer.go:357] "Caches are synced" controller="certificate-csrsigning-kube-apiserver-client"
I0510 17:27:21.953150 1 shared_informer.go:357] "Caches are synced" controller="certificate-csrsigning-legacy-unknown"
I0510 17:27:21.953294 1 shared_informer.go:357] "Caches are synced" controller="taint"
I0510 17:27:21.953425 1 node_lifecycle_controller.go:1221] "Initializing eviction metric for zone" logger="node-lifecycle-controller" zone=""
I0510 17:27:21.953521 1 node_lifecycle_controller.go:873] "Missing timestamp for Node. Assuming now as a timestamp" logger="node-lifecycle-controller" node="dockerenv-940737"
I0510 17:27:21.953688 1 node_lifecycle_controller.go:1067] "Controller detected that zone is now in new state" logger="node-lifecycle-controller" zone="" newState="Normal"
I0510 17:27:21.959829 1 shared_informer.go:357] "Caches are synced" controller="node"
I0510 17:27:21.959951 1 range_allocator.go:177] "Sending events to api server" logger="node-ipam-controller"
I0510 17:27:21.960011 1 range_allocator.go:183] "Starting range CIDR allocator" logger="node-ipam-controller"
I0510 17:27:21.960097 1 shared_informer.go:350] "Waiting for caches to sync" controller="cidrallocator"
I0510 17:27:21.960173 1 shared_informer.go:357] "Caches are synced" controller="cidrallocator"
I0510 17:27:21.986608 1 range_allocator.go:428] "Set node PodCIDR" logger="node-ipam-controller" node="dockerenv-940737" podCIDRs=["10.244.0.0/24"]
I0510 17:27:22.048595 1 shared_informer.go:357] "Caches are synced" controller="PV protection"
I0510 17:27:22.062769 1 shared_informer.go:357] "Caches are synced" controller="attach detach"
I0510 17:27:22.099121 1 shared_informer.go:357] "Caches are synced" controller="persistent volume"
I0510 17:27:22.197659 1 shared_informer.go:357] "Caches are synced" controller="stateful set"
I0510 17:27:22.249641 1 shared_informer.go:357] "Caches are synced" controller="endpoint_slice"
I0510 17:27:22.249650 1 shared_informer.go:357] "Caches are synced" controller="endpoint_slice_mirroring"
I0510 17:27:22.249671 1 shared_informer.go:357] "Caches are synced" controller="disruption"
I0510 17:27:22.258171 1 shared_informer.go:357] "Caches are synced" controller="resource quota"
I0510 17:27:22.290126 1 shared_informer.go:357] "Caches are synced" controller="resource quota"
I0510 17:27:22.693264 1 shared_informer.go:357] "Caches are synced" controller="garbage collector"
I0510 17:27:22.706039 1 shared_informer.go:357] "Caches are synced" controller="garbage collector"
I0510 17:27:22.706128 1 garbagecollector.go:154] "Garbage collector: all resource monitors have synced" logger="garbage-collector-controller"
I0510 17:27:22.706157 1 garbagecollector.go:157] "Proceeding to collect garbage" logger="garbage-collector-controller"
==> kube-proxy [5c34073b26db0586f64776214ed398d39af663f33963da20f908d0f54b442f03] <==
I0510 17:27:23.340325 1 server_linux.go:63] "Using iptables proxy"
I0510 17:27:23.441620 1 server.go:715] "Successfully retrieved node IP(s)" IPs=["192.168.49.2"]
E0510 17:27:23.441692 1 server.go:245] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I0510 17:27:23.478335 1 server.go:254] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I0510 17:27:23.478399 1 server_linux.go:145] "Using iptables Proxier"
I0510 17:27:23.483991 1 proxier.go:243] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I0510 17:27:23.485145 1 server.go:516] "Version info" version="v1.33.0"
I0510 17:27:23.485950 1 server.go:518] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0510 17:27:23.488335 1 config.go:199] "Starting service config controller"
I0510 17:27:23.488453 1 shared_informer.go:350] "Waiting for caches to sync" controller="service config"
I0510 17:27:23.488525 1 config.go:105] "Starting endpoint slice config controller"
I0510 17:27:23.488568 1 shared_informer.go:350] "Waiting for caches to sync" controller="endpoint slice config"
I0510 17:27:23.488611 1 config.go:440] "Starting serviceCIDR config controller"
I0510 17:27:23.488652 1 shared_informer.go:350] "Waiting for caches to sync" controller="serviceCIDR config"
I0510 17:27:23.489989 1 config.go:329] "Starting node config controller"
I0510 17:27:23.490085 1 shared_informer.go:350] "Waiting for caches to sync" controller="node config"
I0510 17:27:23.589105 1 shared_informer.go:357] "Caches are synced" controller="serviceCIDR config"
I0510 17:27:23.589118 1 shared_informer.go:357] "Caches are synced" controller="service config"
I0510 17:27:23.589153 1 shared_informer.go:357] "Caches are synced" controller="endpoint slice config"
I0510 17:27:23.590442 1 shared_informer.go:357] "Caches are synced" controller="node config"
==> kube-scheduler [038c6e33c836d6a270c0d6fa7348371e004ba8140affd97c4802ec6e4cf91290] <==
W0510 17:27:16.272180 1 authentication.go:398] Continuing without authentication configuration. This may treat all requests as anonymous.
W0510 17:27:16.272186 1 authentication.go:399] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
I0510 17:27:16.299019 1 server.go:171] "Starting Kubernetes Scheduler" version="v1.33.0"
I0510 17:27:16.299073 1 server.go:173] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0510 17:27:16.301438 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I0510 17:27:16.301480 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I0510 17:27:16.302412 1 secure_serving.go:211] Serving securely on 127.0.0.1:10259
I0510 17:27:16.302701 1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
E0510 17:27:16.307650 1 reflector.go:200] "Failed to watch" err="failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIStorageCapacity"
E0510 17:27:16.308609 1 reflector.go:200] "Failed to watch" err="failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError" reflector="runtime/asm_arm64.s:1223" type="*v1.ConfigMap"
E0510 17:27:16.314930 1 reflector.go:200] "Failed to watch" err="failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolume"
E0510 17:27:16.315043 1 reflector.go:200] "Failed to watch" err="failed to list *v1.VolumeAttachment: volumeattachments.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"volumeattachments\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.VolumeAttachment"
E0510 17:27:16.315128 1 reflector.go:200] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget"
E0510 17:27:16.315243 1 reflector.go:200] "Failed to watch" err="failed to list *v1.Pod: pods is forbidden: User \"system:kube-scheduler\" cannot list resource \"pods\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Pod"
E0510 17:27:16.315370 1 reflector.go:200] "Failed to watch" err="failed to list *v1.Node: nodes is forbidden: User \"system:kube-scheduler\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node"
E0510 17:27:16.315445 1 reflector.go:200] "Failed to watch" err="failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicationController"
E0510 17:27:16.315501 1 reflector.go:200] "Failed to watch" err="failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicaSet"
E0510 17:27:16.315588 1 reflector.go:200] "Failed to watch" err="failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StorageClass"
E0510 17:27:16.316783 1 reflector.go:200] "Failed to watch" err="failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StatefulSet"
E0510 17:27:16.316890 1 reflector.go:200] "Failed to watch" err="failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csinodes\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSINode"
E0510 17:27:16.316980 1 reflector.go:200] "Failed to watch" err="failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolumeClaim"
E0510 17:27:16.317071 1 reflector.go:200] "Failed to watch" err="failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver"
E0510 17:27:16.317177 1 reflector.go:200] "Failed to watch" err="failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service"
E0510 17:27:16.317790 1 reflector.go:200] "Failed to watch" err="failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Namespace"
I0510 17:27:17.501824 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
==> kubelet <==
May 10 17:27:22 dockerenv-940737 kubelet[1530]: I0510 17:27:22.010244 1530 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/fdf65ddb-385b-449c-8a6e-e559e1095b47-tmp\") pod \"storage-provisioner\" (UID: \"fdf65ddb-385b-449c-8a6e-e559e1095b47\") " pod="kube-system/storage-provisioner"
May 10 17:27:22 dockerenv-940737 kubelet[1530]: I0510 17:27:22.010306 1530 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lhj7h\" (UniqueName: \"kubernetes.io/projected/fdf65ddb-385b-449c-8a6e-e559e1095b47-kube-api-access-lhj7h\") pod \"storage-provisioner\" (UID: \"fdf65ddb-385b-449c-8a6e-e559e1095b47\") " pod="kube-system/storage-provisioner"
May 10 17:27:22 dockerenv-940737 kubelet[1530]: E0510 17:27:22.120774 1530 projected.go:289] Couldn't get configMap kube-system/kube-root-ca.crt: configmap "kube-root-ca.crt" not found
May 10 17:27:22 dockerenv-940737 kubelet[1530]: E0510 17:27:22.120811 1530 projected.go:194] Error preparing data for projected volume kube-api-access-lhj7h for pod kube-system/storage-provisioner: configmap "kube-root-ca.crt" not found
May 10 17:27:22 dockerenv-940737 kubelet[1530]: E0510 17:27:22.120905 1530 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/fdf65ddb-385b-449c-8a6e-e559e1095b47-kube-api-access-lhj7h podName:fdf65ddb-385b-449c-8a6e-e559e1095b47 nodeName:}" failed. No retries permitted until 2025-05-10 17:27:22.620879586 +0000 UTC m=+4.873791263 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-lhj7h" (UniqueName: "kubernetes.io/projected/fdf65ddb-385b-449c-8a6e-e559e1095b47-kube-api-access-lhj7h") pod "storage-provisioner" (UID: "fdf65ddb-385b-449c-8a6e-e559e1095b47") : configmap "kube-root-ca.crt" not found
May 10 17:27:22 dockerenv-940737 kubelet[1530]: I0510 17:27:22.714941 1530 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/d921cac3-cd47-4aa8-a134-daf41e1f22d1-xtables-lock\") pod \"kube-proxy-nttll\" (UID: \"d921cac3-cd47-4aa8-a134-daf41e1f22d1\") " pod="kube-system/kube-proxy-nttll"
May 10 17:27:22 dockerenv-940737 kubelet[1530]: I0510 17:27:22.715002 1530 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d921cac3-cd47-4aa8-a134-daf41e1f22d1-lib-modules\") pod \"kube-proxy-nttll\" (UID: \"d921cac3-cd47-4aa8-a134-daf41e1f22d1\") " pod="kube-system/kube-proxy-nttll"
May 10 17:27:22 dockerenv-940737 kubelet[1530]: I0510 17:27:22.715028 1530 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtqc5\" (UniqueName: \"kubernetes.io/projected/d921cac3-cd47-4aa8-a134-daf41e1f22d1-kube-api-access-gtqc5\") pod \"kube-proxy-nttll\" (UID: \"d921cac3-cd47-4aa8-a134-daf41e1f22d1\") " pod="kube-system/kube-proxy-nttll"
May 10 17:27:22 dockerenv-940737 kubelet[1530]: I0510 17:27:22.715088 1530 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/d921cac3-cd47-4aa8-a134-daf41e1f22d1-kube-proxy\") pod \"kube-proxy-nttll\" (UID: \"d921cac3-cd47-4aa8-a134-daf41e1f22d1\") " pod="kube-system/kube-proxy-nttll"
May 10 17:27:22 dockerenv-940737 kubelet[1530]: I0510 17:27:22.715576 1530 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory"
May 10 17:27:22 dockerenv-940737 kubelet[1530]: I0510 17:27:22.815537 1530 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xzxbv\" (UniqueName: \"kubernetes.io/projected/e47adb02-aece-490e-b915-083b095b90ac-kube-api-access-xzxbv\") pod \"kindnet-6zjb8\" (UID: \"e47adb02-aece-490e-b915-083b095b90ac\") " pod="kube-system/kindnet-6zjb8"
May 10 17:27:22 dockerenv-940737 kubelet[1530]: I0510 17:27:22.815635 1530 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/e47adb02-aece-490e-b915-083b095b90ac-cni-cfg\") pod \"kindnet-6zjb8\" (UID: \"e47adb02-aece-490e-b915-083b095b90ac\") " pod="kube-system/kindnet-6zjb8"
May 10 17:27:22 dockerenv-940737 kubelet[1530]: I0510 17:27:22.815670 1530 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/e47adb02-aece-490e-b915-083b095b90ac-xtables-lock\") pod \"kindnet-6zjb8\" (UID: \"e47adb02-aece-490e-b915-083b095b90ac\") " pod="kube-system/kindnet-6zjb8"
May 10 17:27:22 dockerenv-940737 kubelet[1530]: I0510 17:27:22.815695 1530 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/e47adb02-aece-490e-b915-083b095b90ac-lib-modules\") pod \"kindnet-6zjb8\" (UID: \"e47adb02-aece-490e-b915-083b095b90ac\") " pod="kube-system/kindnet-6zjb8"
May 10 17:27:23 dockerenv-940737 kubelet[1530]: I0510 17:27:23.219149 1530 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cntrv\" (UniqueName: \"kubernetes.io/projected/f7b2ff82-68d5-4407-913c-dd86dd606133-kube-api-access-cntrv\") pod \"coredns-674b8bbfcf-gnw5f\" (UID: \"f7b2ff82-68d5-4407-913c-dd86dd606133\") " pod="kube-system/coredns-674b8bbfcf-gnw5f"
May 10 17:27:23 dockerenv-940737 kubelet[1530]: I0510 17:27:23.219207 1530 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f7b2ff82-68d5-4407-913c-dd86dd606133-config-volume\") pod \"coredns-674b8bbfcf-gnw5f\" (UID: \"f7b2ff82-68d5-4407-913c-dd86dd606133\") " pod="kube-system/coredns-674b8bbfcf-gnw5f"
May 10 17:27:23 dockerenv-940737 kubelet[1530]: E0510 17:27:23.518118 1530 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"890f39a058ded369808bb8d4d718e8adc2d0bdafc83f4b926584ce6db45b3d82\": failed to find network info for sandbox \"890f39a058ded369808bb8d4d718e8adc2d0bdafc83f4b926584ce6db45b3d82\""
May 10 17:27:23 dockerenv-940737 kubelet[1530]: E0510 17:27:23.518196 1530 kuberuntime_sandbox.go:70] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"890f39a058ded369808bb8d4d718e8adc2d0bdafc83f4b926584ce6db45b3d82\": failed to find network info for sandbox \"890f39a058ded369808bb8d4d718e8adc2d0bdafc83f4b926584ce6db45b3d82\"" pod="kube-system/coredns-674b8bbfcf-gnw5f"
May 10 17:27:23 dockerenv-940737 kubelet[1530]: E0510 17:27:23.518232 1530 kuberuntime_manager.go:1252] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"890f39a058ded369808bb8d4d718e8adc2d0bdafc83f4b926584ce6db45b3d82\": failed to find network info for sandbox \"890f39a058ded369808bb8d4d718e8adc2d0bdafc83f4b926584ce6db45b3d82\"" pod="kube-system/coredns-674b8bbfcf-gnw5f"
May 10 17:27:23 dockerenv-940737 kubelet[1530]: E0510 17:27:23.518297 1530 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"coredns-674b8bbfcf-gnw5f_kube-system(f7b2ff82-68d5-4407-913c-dd86dd606133)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"coredns-674b8bbfcf-gnw5f_kube-system(f7b2ff82-68d5-4407-913c-dd86dd606133)\\\": rpc error: code = Unknown desc = failed to setup network for sandbox \\\"890f39a058ded369808bb8d4d718e8adc2d0bdafc83f4b926584ce6db45b3d82\\\": failed to find network info for sandbox \\\"890f39a058ded369808bb8d4d718e8adc2d0bdafc83f4b926584ce6db45b3d82\\\"\"" pod="kube-system/coredns-674b8bbfcf-gnw5f" podUID="f7b2ff82-68d5-4407-913c-dd86dd606133"
May 10 17:27:23 dockerenv-940737 kubelet[1530]: I0510 17:27:23.976033 1530 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kindnet-6zjb8" podStartSLOduration=1.9760018110000002 podStartE2EDuration="1.976001811s" podCreationTimestamp="2025-05-10 17:27:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-05-10 17:27:23.975879145 +0000 UTC m=+6.228790830" watchObservedRunningTime="2025-05-10 17:27:23.976001811 +0000 UTC m=+6.228913488"
May 10 17:27:24 dockerenv-940737 kubelet[1530]: I0510 17:27:24.009605 1530 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-proxy-nttll" podStartSLOduration=2.009550927 podStartE2EDuration="2.009550927s" podCreationTimestamp="2025-05-10 17:27:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-05-10 17:27:23.991921521 +0000 UTC m=+6.244833206" watchObservedRunningTime="2025-05-10 17:27:24.009550927 +0000 UTC m=+6.262462604"
May 10 17:27:26 dockerenv-940737 kubelet[1530]: I0510 17:27:26.749527 1530 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=7.749499237 podStartE2EDuration="7.749499237s" podCreationTimestamp="2025-05-10 17:27:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-05-10 17:27:24.025942668 +0000 UTC m=+6.278854353" watchObservedRunningTime="2025-05-10 17:27:26.749499237 +0000 UTC m=+9.002410914"
May 10 17:27:28 dockerenv-940737 kubelet[1530]: I0510 17:27:28.368929 1530 kuberuntime_manager.go:1746] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24"
May 10 17:27:28 dockerenv-940737 kubelet[1530]: I0510 17:27:28.369803 1530 kubelet_network.go:61] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
==> storage-provisioner [d4d5e664d34b53e2e5df2adf4edbb9eb107ae756e9309f951599ef3aabcb3360] <==
I0510 17:27:23.239497 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
-- /stdout --
helpers_test.go:254: (dbg) Run: out/minikube-linux-arm64 status --format={{.APIServer}} -p dockerenv-940737 -n dockerenv-940737
helpers_test.go:261: (dbg) Run: kubectl --context dockerenv-940737 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:272: non-running pods: coredns-674b8bbfcf-gnw5f
helpers_test.go:274: ======> post-mortem[TestDockerEnvContainerd]: describe non-running pods <======
helpers_test.go:277: (dbg) Run: kubectl --context dockerenv-940737 describe pod coredns-674b8bbfcf-gnw5f
helpers_test.go:277: (dbg) Non-zero exit: kubectl --context dockerenv-940737 describe pod coredns-674b8bbfcf-gnw5f: exit status 1 (107.595991ms)
** stderr **
Error from server (NotFound): pods "coredns-674b8bbfcf-gnw5f" not found
** /stderr **
helpers_test.go:279: kubectl --context dockerenv-940737 describe pod coredns-674b8bbfcf-gnw5f: exit status 1
helpers_test.go:175: Cleaning up "dockerenv-940737" profile ...
helpers_test.go:178: (dbg) Run: out/minikube-linux-arm64 delete -p dockerenv-940737
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p dockerenv-940737: (1.928311194s)
--- FAIL: TestDockerEnvContainerd (46.86s)