=== RUN TestDockerEnvContainerd
docker_test.go:170: running with containerd true linux arm64
docker_test.go:181: (dbg) Run: out/minikube-linux-arm64 start -p dockerenv-668100 --driver=docker --container-runtime=containerd
docker_test.go:181: (dbg) Done: out/minikube-linux-arm64 start -p dockerenv-668100 --driver=docker --container-runtime=containerd: (35.002166027s)
docker_test.go:189: (dbg) Run: /bin/bash -c "out/minikube-linux-arm64 docker-env --ssh-host --ssh-add -p dockerenv-668100"
docker_test.go:220: (dbg) Run: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-KW3V3hsx1aeO/agent.900266" SSH_AGENT_PID="900267" DOCKER_HOST=ssh://docker@127.0.0.1:33884 docker version"
docker_test.go:243: (dbg) Run: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-KW3V3hsx1aeO/agent.900266" SSH_AGENT_PID="900267" DOCKER_HOST=ssh://docker@127.0.0.1:33884 DOCKER_BUILDKIT=0 docker build -t local/minikube-dockerenv-containerd-test:latest testdata/docker-env"
docker_test.go:243: (dbg) Done: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-KW3V3hsx1aeO/agent.900266" SSH_AGENT_PID="900267" DOCKER_HOST=ssh://docker@127.0.0.1:33884 DOCKER_BUILDKIT=0 docker build -t local/minikube-dockerenv-containerd-test:latest testdata/docker-env": (1.235158213s)
docker_test.go:250: (dbg) Run: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-KW3V3hsx1aeO/agent.900266" SSH_AGENT_PID="900267" DOCKER_HOST=ssh://docker@127.0.0.1:33884 docker image ls"
docker_test.go:250: (dbg) Non-zero exit: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-KW3V3hsx1aeO/agent.900266" SSH_AGENT_PID="900267" DOCKER_HOST=ssh://docker@127.0.0.1:33884 docker image ls": exit status 1 (687.892702ms)
** stderr **
error during connect: Get "http://docker.example.com/v1.43/images/json": EOF
** /stderr **
docker_test.go:252: failed to execute 'docker image ls', error: exit status 1, output:
** stderr **
error during connect: Get "http://docker.example.com/v1.43/images/json": EOF
** /stderr **
panic.go:636: *** TestDockerEnvContainerd FAILED at 2025-09-04 06:27:19.575725989 +0000 UTC m=+456.708784175
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestDockerEnvContainerd]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestDockerEnvContainerd]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect dockerenv-668100
helpers_test.go:243: (dbg) docker inspect dockerenv-668100:
-- stdout --
[
{
"Id": "cb2217a70bc44049e2e79dd29e3abd49baeb31607570e3e082ed38bde4b94476",
"Created": "2025-09-04T06:26:36.49152314Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 897751,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-09-04T06:26:36.55667274Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:05a67d9d64bd61dbd33e828ddc4dedd9a0cf93c553e7627e8e0a3cfe0b4eba90",
"ResolvConfPath": "/var/lib/docker/containers/cb2217a70bc44049e2e79dd29e3abd49baeb31607570e3e082ed38bde4b94476/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/cb2217a70bc44049e2e79dd29e3abd49baeb31607570e3e082ed38bde4b94476/hostname",
"HostsPath": "/var/lib/docker/containers/cb2217a70bc44049e2e79dd29e3abd49baeb31607570e3e082ed38bde4b94476/hosts",
"LogPath": "/var/lib/docker/containers/cb2217a70bc44049e2e79dd29e3abd49baeb31607570e3e082ed38bde4b94476/cb2217a70bc44049e2e79dd29e3abd49baeb31607570e3e082ed38bde4b94476-json.log",
"Name": "/dockerenv-668100",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"dockerenv-668100:/var",
"/lib/modules:/lib/modules:ro"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "dockerenv-668100",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "cb2217a70bc44049e2e79dd29e3abd49baeb31607570e3e082ed38bde4b94476",
"LowerDir": "/var/lib/docker/overlay2/349b08f6c88dbd2af236739a3f937cdd3313bda9b12a3962648d87de8d11a13c-init/diff:/var/lib/docker/overlay2/fe768064e77edef7ab034159629a7675e982c755adb79a9cc21b6b108aaa3716/diff",
"MergedDir": "/var/lib/docker/overlay2/349b08f6c88dbd2af236739a3f937cdd3313bda9b12a3962648d87de8d11a13c/merged",
"UpperDir": "/var/lib/docker/overlay2/349b08f6c88dbd2af236739a3f937cdd3313bda9b12a3962648d87de8d11a13c/diff",
"WorkDir": "/var/lib/docker/overlay2/349b08f6c88dbd2af236739a3f937cdd3313bda9b12a3962648d87de8d11a13c/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "volume",
"Name": "dockerenv-668100",
"Source": "/var/lib/docker/volumes/dockerenv-668100/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
},
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
}
],
"Config": {
"Hostname": "dockerenv-668100",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "dockerenv-668100",
"name.minikube.sigs.k8s.io": "dockerenv-668100",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "5e8a6213b869eb0be2c4efa1607d6fbc4c75486f7f239e50a5471e6dfef93040",
"SandboxKey": "/var/run/docker/netns/5e8a6213b869",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33884"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33885"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33888"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33886"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33887"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"dockerenv-668100": {
"IPAMConfig": {
"IPv4Address": "192.168.49.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "f2:fd:78:b4:54:9c",
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "d906c13f66df78a703595b0dfeefcfe4ed591c611fcc42fba2db6dcf65f8b9d0",
"EndpointID": "fea7e4ef1b5f79425a86d7be14bcc71c960b1de1539b3c50417cdab984e5dc82",
"Gateway": "192.168.49.1",
"IPAddress": "192.168.49.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"dockerenv-668100",
"cb2217a70bc4"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-arm64 status --format={{.Host}} -p dockerenv-668100 -n dockerenv-668100
helpers_test.go:252: <<< TestDockerEnvContainerd FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestDockerEnvContainerd]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-arm64 -p dockerenv-668100 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-arm64 -p dockerenv-668100 logs -n 25: (1.258359851s)
helpers_test.go:260: TestDockerEnvContainerd logs:
-- stdout --
==> Audit <==
┌────────────┬───────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬──────────────────┬─────────┬─────────┬─────────────────────┬─────────────────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├────────────┼───────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼──────────────────┼─────────┼─────────┼─────────────────────┼─────────────────────┤
│ ip │ addons-903438 ip │ addons-903438 │ jenkins │ v1.36.0 │ 04 Sep 25 06:24 UTC │ 04 Sep 25 06:24 UTC │
│ addons │ addons-903438 addons disable registry --alsologtostderr -v=1 │ addons-903438 │ jenkins │ v1.36.0 │ 04 Sep 25 06:24 UTC │ 04 Sep 25 06:24 UTC │
│ addons │ addons-903438 addons disable nvidia-device-plugin --alsologtostderr -v=1 │ addons-903438 │ jenkins │ v1.36.0 │ 04 Sep 25 06:24 UTC │ 04 Sep 25 06:24 UTC │
│ addons │ addons-903438 addons disable cloud-spanner --alsologtostderr -v=1 │ addons-903438 │ jenkins │ v1.36.0 │ 04 Sep 25 06:24 UTC │ 04 Sep 25 06:24 UTC │
│ addons │ enable headlamp -p addons-903438 --alsologtostderr -v=1 │ addons-903438 │ jenkins │ v1.36.0 │ 04 Sep 25 06:24 UTC │ 04 Sep 25 06:24 UTC │
│ ssh │ addons-903438 ssh cat /opt/local-path-provisioner/pvc-6aa63eb0-ba24-46af-ab92-52e9a2ec4d21_default_test-pvc/file1 │ addons-903438 │ jenkins │ v1.36.0 │ 04 Sep 25 06:24 UTC │ 04 Sep 25 06:24 UTC │
│ addons │ addons-903438 addons disable storage-provisioner-rancher --alsologtostderr -v=1 │ addons-903438 │ jenkins │ v1.36.0 │ 04 Sep 25 06:24 UTC │ 04 Sep 25 06:25 UTC │
│ addons │ addons-903438 addons disable headlamp --alsologtostderr -v=1 │ addons-903438 │ jenkins │ v1.36.0 │ 04 Sep 25 06:25 UTC │ 04 Sep 25 06:25 UTC │
│ addons │ addons-903438 addons disable metrics-server --alsologtostderr -v=1 │ addons-903438 │ jenkins │ v1.36.0 │ 04 Sep 25 06:25 UTC │ 04 Sep 25 06:25 UTC │
│ addons │ addons-903438 addons disable inspektor-gadget --alsologtostderr -v=1 │ addons-903438 │ jenkins │ v1.36.0 │ 04 Sep 25 06:25 UTC │ 04 Sep 25 06:25 UTC │
│ addons │ addons-903438 addons disable volumesnapshots --alsologtostderr -v=1 │ addons-903438 │ jenkins │ v1.36.0 │ 04 Sep 25 06:25 UTC │ 04 Sep 25 06:25 UTC │
│ addons │ addons-903438 addons disable csi-hostpath-driver --alsologtostderr -v=1 │ addons-903438 │ jenkins │ v1.36.0 │ 04 Sep 25 06:25 UTC │ 04 Sep 25 06:26 UTC │
│ addons │ configure registry-creds -f ./testdata/addons_testconfig.json -p addons-903438 │ addons-903438 │ jenkins │ v1.36.0 │ 04 Sep 25 06:26 UTC │ 04 Sep 25 06:26 UTC │
│ addons │ addons-903438 addons disable registry-creds --alsologtostderr -v=1 │ addons-903438 │ jenkins │ v1.36.0 │ 04 Sep 25 06:26 UTC │ 04 Sep 25 06:26 UTC │
│ ssh │ addons-903438 ssh curl -s http://127.0.0.1/ -H 'Host: nginx.example.com' │ addons-903438 │ jenkins │ v1.36.0 │ 04 Sep 25 06:26 UTC │ 04 Sep 25 06:26 UTC │
│ ip │ addons-903438 ip │ addons-903438 │ jenkins │ v1.36.0 │ 04 Sep 25 06:26 UTC │ 04 Sep 25 06:26 UTC │
│ addons │ addons-903438 addons disable ingress-dns --alsologtostderr -v=1 │ addons-903438 │ jenkins │ v1.36.0 │ 04 Sep 25 06:26 UTC │ 04 Sep 25 06:26 UTC │
│ addons │ addons-903438 addons disable ingress --alsologtostderr -v=1 │ addons-903438 │ jenkins │ v1.36.0 │ 04 Sep 25 06:26 UTC │ 04 Sep 25 06:26 UTC │
│ stop │ -p addons-903438 │ addons-903438 │ jenkins │ v1.36.0 │ 04 Sep 25 06:26 UTC │ 04 Sep 25 06:26 UTC │
│ addons │ enable dashboard -p addons-903438 │ addons-903438 │ jenkins │ v1.36.0 │ 04 Sep 25 06:26 UTC │ 04 Sep 25 06:26 UTC │
│ addons │ disable dashboard -p addons-903438 │ addons-903438 │ jenkins │ v1.36.0 │ 04 Sep 25 06:26 UTC │ 04 Sep 25 06:26 UTC │
│ addons │ disable gvisor -p addons-903438 │ addons-903438 │ jenkins │ v1.36.0 │ 04 Sep 25 06:26 UTC │ 04 Sep 25 06:26 UTC │
│ delete │ -p addons-903438 │ addons-903438 │ jenkins │ v1.36.0 │ 04 Sep 25 06:26 UTC │ 04 Sep 25 06:26 UTC │
│ start │ -p dockerenv-668100 --driver=docker --container-runtime=containerd │ dockerenv-668100 │ jenkins │ v1.36.0 │ 04 Sep 25 06:26 UTC │ 04 Sep 25 06:27 UTC │
│ docker-env │ --ssh-host --ssh-add -p dockerenv-668100 │ dockerenv-668100 │ jenkins │ v1.36.0 │ 04 Sep 25 06:27 UTC │ 04 Sep 25 06:27 UTC │
└────────────┴───────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────┴─────────┴─────────┴─────────────────────┴─────────────────────┘
==> Last Start <==
Log file created at: 2025/09/04 06:26:31
Running on machine: ip-172-31-31-251
Binary: Built with gc go1.24.6 for linux/arm64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0904 06:26:31.230757 897361 out.go:360] Setting OutFile to fd 1 ...
I0904 06:26:31.230878 897361 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0904 06:26:31.230882 897361 out.go:374] Setting ErrFile to fd 2...
I0904 06:26:31.230885 897361 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0904 06:26:31.231148 897361 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21409-875589/.minikube/bin
I0904 06:26:31.231548 897361 out.go:368] Setting JSON to false
I0904 06:26:31.232328 897361 start.go:130] hostinfo: {"hostname":"ip-172-31-31-251","uptime":14941,"bootTime":1756952251,"procs":148,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1084-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"982e3628-3742-4b3e-bb63-ac1b07660ec7"}
I0904 06:26:31.232390 897361 start.go:140] virtualization:
I0904 06:26:31.236890 897361 out.go:179] * [dockerenv-668100] minikube v1.36.0 on Ubuntu 20.04 (arm64)
I0904 06:26:31.241721 897361 notify.go:220] Checking for updates...
I0904 06:26:31.245835 897361 out.go:179] - MINIKUBE_LOCATION=21409
I0904 06:26:31.249329 897361 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0904 06:26:31.252540 897361 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21409-875589/kubeconfig
I0904 06:26:31.255670 897361 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21409-875589/.minikube
I0904 06:26:31.258796 897361 out.go:179] - MINIKUBE_BIN=out/minikube-linux-arm64
I0904 06:26:31.261927 897361 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I0904 06:26:31.265245 897361 driver.go:421] Setting default libvirt URI to qemu:///system
I0904 06:26:31.290462 897361 docker.go:123] docker version: linux-28.1.1:Docker Engine - Community
I0904 06:26:31.290565 897361 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0904 06:26:31.354096 897361 info.go:266] docker info: {ID:EOU5:DNGX:XN6V:L2FZ:UXRM:5TWK:EVUR:KC2F:GT7Z:Y4O4:GB77:5PD3 Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:23 OomKillDisable:true NGoroutines:42 SystemTime:2025-09-04 06:26:31.345035869 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214835200 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-31-251 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I0904 06:26:31.354191 897361 docker.go:318] overlay module found
I0904 06:26:31.357459 897361 out.go:179] * Using the docker driver based on user configuration
I0904 06:26:31.360266 897361 start.go:304] selected driver: docker
I0904 06:26:31.360272 897361 start.go:918] validating driver "docker" against <nil>
I0904 06:26:31.360284 897361 start.go:929] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0904 06:26:31.360399 897361 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0904 06:26:31.413690 897361 info.go:266] docker info: {ID:EOU5:DNGX:XN6V:L2FZ:UXRM:5TWK:EVUR:KC2F:GT7Z:Y4O4:GB77:5PD3 Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:23 OomKillDisable:true NGoroutines:42 SystemTime:2025-09-04 06:26:31.404143301 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214835200 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-31-251 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I0904 06:26:31.413832 897361 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I0904 06:26:31.414107 897361 start_flags.go:410] Using suggested 3072MB memory alloc based on sys=7834MB, container=7834MB
I0904 06:26:31.414258 897361 start_flags.go:974] Wait components to verify : map[apiserver:true system_pods:true]
I0904 06:26:31.417349 897361 out.go:179] * Using Docker driver with root privileges
I0904 06:26:31.420240 897361 cni.go:84] Creating CNI manager for ""
I0904 06:26:31.420304 897361 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I0904 06:26:31.420311 897361 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I0904 06:26:31.420385 897361 start.go:348] cluster config:
{Name:dockerenv-668100 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:dockerenv-668100 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerR
untime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0904 06:26:31.423562 897361 out.go:179] * Starting "dockerenv-668100" primary control-plane node in "dockerenv-668100" cluster
I0904 06:26:31.426343 897361 cache.go:123] Beginning downloading kic base image for docker with containerd
I0904 06:26:31.429262 897361 out.go:179] * Pulling base image v0.0.47-1756936034-21409 ...
I0904 06:26:31.432082 897361 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime containerd
I0904 06:26:31.432137 897361 preload.go:146] Found local preload: /home/jenkins/minikube-integration/21409-875589/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-arm64.tar.lz4
I0904 06:26:31.432176 897361 cache.go:58] Caching tarball of preloaded images
I0904 06:26:31.432180 897361 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc in local docker daemon
I0904 06:26:31.432309 897361 preload.go:172] Found /home/jenkins/minikube-integration/21409-875589/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-arm64.tar.lz4 in cache, skipping download
I0904 06:26:31.432320 897361 cache.go:61] Finished verifying existence of preloaded tar for v1.34.0 on containerd
I0904 06:26:31.432654 897361 profile.go:143] Saving config to /home/jenkins/minikube-integration/21409-875589/.minikube/profiles/dockerenv-668100/config.json ...
I0904 06:26:31.432674 897361 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21409-875589/.minikube/profiles/dockerenv-668100/config.json: {Name:mk48e1434515eebf26f473c4887134b4f912e69f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0904 06:26:31.455767 897361 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc in local docker daemon, skipping pull
I0904 06:26:31.455779 897361 cache.go:147] gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc exists in daemon, skipping load
I0904 06:26:31.455799 897361 cache.go:232] Successfully downloaded all kic artifacts
I0904 06:26:31.455831 897361 start.go:360] acquireMachinesLock for dockerenv-668100: {Name:mkdf493a7becb458700087979914aeee71b2a19f Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0904 06:26:31.456580 897361 start.go:364] duration metric: took 733.213µs to acquireMachinesLock for "dockerenv-668100"
I0904 06:26:31.456611 897361 start.go:93] Provisioning new machine with config: &{Name:dockerenv-668100 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:dockerenv-668100 Namespace:default APIServerHAVIP: APIServerNa
me:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAut
hSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0904 06:26:31.456679 897361 start.go:125] createHost starting for "" (driver="docker")
I0904 06:26:31.460032 897361 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I0904 06:26:31.460289 897361 start.go:159] libmachine.API.Create for "dockerenv-668100" (driver="docker")
I0904 06:26:31.460318 897361 client.go:168] LocalClient.Create starting
I0904 06:26:31.460383 897361 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21409-875589/.minikube/certs/ca.pem
I0904 06:26:31.460422 897361 main.go:141] libmachine: Decoding PEM data...
I0904 06:26:31.460433 897361 main.go:141] libmachine: Parsing certificate...
I0904 06:26:31.460488 897361 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21409-875589/.minikube/certs/cert.pem
I0904 06:26:31.460507 897361 main.go:141] libmachine: Decoding PEM data...
I0904 06:26:31.460515 897361 main.go:141] libmachine: Parsing certificate...
I0904 06:26:31.460876 897361 cli_runner.go:164] Run: docker network inspect dockerenv-668100 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0904 06:26:31.476481 897361 cli_runner.go:211] docker network inspect dockerenv-668100 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0904 06:26:31.476550 897361 network_create.go:284] running [docker network inspect dockerenv-668100] to gather additional debugging logs...
I0904 06:26:31.476565 897361 cli_runner.go:164] Run: docker network inspect dockerenv-668100
W0904 06:26:31.492369 897361 cli_runner.go:211] docker network inspect dockerenv-668100 returned with exit code 1
I0904 06:26:31.492388 897361 network_create.go:287] error running [docker network inspect dockerenv-668100]: docker network inspect dockerenv-668100: exit status 1
stdout:
[]
stderr:
Error response from daemon: network dockerenv-668100 not found
I0904 06:26:31.492399 897361 network_create.go:289] output of [docker network inspect dockerenv-668100]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network dockerenv-668100 not found
** /stderr **
I0904 06:26:31.492521 897361 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0904 06:26:31.513524 897361 network.go:206] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x40018c2a80}
I0904 06:26:31.513554 897361 network_create.go:124] attempt to create docker network dockerenv-668100 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
I0904 06:26:31.513604 897361 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=dockerenv-668100 dockerenv-668100
I0904 06:26:31.570278 897361 network_create.go:108] docker network dockerenv-668100 192.168.49.0/24 created
I0904 06:26:31.570299 897361 kic.go:121] calculated static IP "192.168.49.2" for the "dockerenv-668100" container
I0904 06:26:31.570386 897361 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0904 06:26:31.586217 897361 cli_runner.go:164] Run: docker volume create dockerenv-668100 --label name.minikube.sigs.k8s.io=dockerenv-668100 --label created_by.minikube.sigs.k8s.io=true
I0904 06:26:31.605345 897361 oci.go:103] Successfully created a docker volume dockerenv-668100
I0904 06:26:31.605433 897361 cli_runner.go:164] Run: docker run --rm --name dockerenv-668100-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=dockerenv-668100 --entrypoint /usr/bin/test -v dockerenv-668100:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc -d /var/lib
I0904 06:26:32.163599 897361 oci.go:107] Successfully prepared a docker volume dockerenv-668100
I0904 06:26:32.163642 897361 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime containerd
I0904 06:26:32.163660 897361 kic.go:194] Starting extracting preloaded images to volume ...
I0904 06:26:32.163737 897361 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21409-875589/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v dockerenv-668100:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc -I lz4 -xf /preloaded.tar -C /extractDir
I0904 06:26:36.421304 897361 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21409-875589/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v dockerenv-668100:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc -I lz4 -xf /preloaded.tar -C /extractDir: (4.257532147s)
I0904 06:26:36.421338 897361 kic.go:203] duration metric: took 4.257660412s to extract preloaded images to volume ...
W0904 06:26:36.421741 897361 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I0904 06:26:36.421841 897361 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0904 06:26:36.476385 897361 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname dockerenv-668100 --name dockerenv-668100 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=dockerenv-668100 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=dockerenv-668100 --network dockerenv-668100 --ip 192.168.49.2 --volume dockerenv-668100:/var --security-opt apparmor=unconfined --memory=3072mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc
I0904 06:26:36.760431 897361 cli_runner.go:164] Run: docker container inspect dockerenv-668100 --format={{.State.Running}}
I0904 06:26:36.780129 897361 cli_runner.go:164] Run: docker container inspect dockerenv-668100 --format={{.State.Status}}
I0904 06:26:36.803587 897361 cli_runner.go:164] Run: docker exec dockerenv-668100 stat /var/lib/dpkg/alternatives/iptables
I0904 06:26:36.861437 897361 oci.go:144] the created container "dockerenv-668100" has a running status.
I0904 06:26:36.861463 897361 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21409-875589/.minikube/machines/dockerenv-668100/id_rsa...
I0904 06:26:36.985120 897361 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21409-875589/.minikube/machines/dockerenv-668100/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0904 06:26:37.014108 897361 cli_runner.go:164] Run: docker container inspect dockerenv-668100 --format={{.State.Status}}
I0904 06:26:37.035668 897361 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0904 06:26:37.035679 897361 kic_runner.go:114] Args: [docker exec --privileged dockerenv-668100 chown docker:docker /home/docker/.ssh/authorized_keys]
I0904 06:26:37.094797 897361 cli_runner.go:164] Run: docker container inspect dockerenv-668100 --format={{.State.Status}}
I0904 06:26:37.125369 897361 machine.go:93] provisionDockerMachine start ...
I0904 06:26:37.125469 897361 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-668100
I0904 06:26:37.154766 897361 main.go:141] libmachine: Using SSH client type: native
I0904 06:26:37.155102 897361 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef840] 0x3f2000 <nil> [] 0s} 127.0.0.1 33884 <nil> <nil>}
I0904 06:26:37.155108 897361 main.go:141] libmachine: About to run SSH command:
hostname
I0904 06:26:37.155733 897361 main.go:141] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:49616->127.0.0.1:33884: read: connection reset by peer
I0904 06:26:40.284347 897361 main.go:141] libmachine: SSH cmd err, output: <nil>: dockerenv-668100
I0904 06:26:40.284361 897361 ubuntu.go:182] provisioning hostname "dockerenv-668100"
I0904 06:26:40.284423 897361 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-668100
I0904 06:26:40.301621 897361 main.go:141] libmachine: Using SSH client type: native
I0904 06:26:40.301930 897361 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef840] 0x3f2000 <nil> [] 0s} 127.0.0.1 33884 <nil> <nil>}
I0904 06:26:40.301939 897361 main.go:141] libmachine: About to run SSH command:
sudo hostname dockerenv-668100 && echo "dockerenv-668100" | sudo tee /etc/hostname
I0904 06:26:40.437210 897361 main.go:141] libmachine: SSH cmd err, output: <nil>: dockerenv-668100
I0904 06:26:40.437291 897361 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-668100
I0904 06:26:40.455069 897361 main.go:141] libmachine: Using SSH client type: native
I0904 06:26:40.455365 897361 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef840] 0x3f2000 <nil> [] 0s} 127.0.0.1 33884 <nil> <nil>}
I0904 06:26:40.455379 897361 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sdockerenv-668100' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 dockerenv-668100/g' /etc/hosts;
else
echo '127.0.1.1 dockerenv-668100' | sudo tee -a /etc/hosts;
fi
fi
I0904 06:26:40.581244 897361 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0904 06:26:40.581262 897361 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21409-875589/.minikube CaCertPath:/home/jenkins/minikube-integration/21409-875589/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21409-875589/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21409-875589/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21409-875589/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21409-875589/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21409-875589/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21409-875589/.minikube}
I0904 06:26:40.581278 897361 ubuntu.go:190] setting up certificates
I0904 06:26:40.581286 897361 provision.go:84] configureAuth start
I0904 06:26:40.581358 897361 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" dockerenv-668100
I0904 06:26:40.598354 897361 provision.go:143] copyHostCerts
I0904 06:26:40.598416 897361 exec_runner.go:144] found /home/jenkins/minikube-integration/21409-875589/.minikube/cert.pem, removing ...
I0904 06:26:40.598424 897361 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21409-875589/.minikube/cert.pem
I0904 06:26:40.598510 897361 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21409-875589/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21409-875589/.minikube/cert.pem (1123 bytes)
I0904 06:26:40.598609 897361 exec_runner.go:144] found /home/jenkins/minikube-integration/21409-875589/.minikube/key.pem, removing ...
I0904 06:26:40.598614 897361 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21409-875589/.minikube/key.pem
I0904 06:26:40.598639 897361 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21409-875589/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21409-875589/.minikube/key.pem (1675 bytes)
I0904 06:26:40.598697 897361 exec_runner.go:144] found /home/jenkins/minikube-integration/21409-875589/.minikube/ca.pem, removing ...
I0904 06:26:40.598705 897361 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21409-875589/.minikube/ca.pem
I0904 06:26:40.598727 897361 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21409-875589/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21409-875589/.minikube/ca.pem (1082 bytes)
I0904 06:26:40.598778 897361 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21409-875589/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21409-875589/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21409-875589/.minikube/certs/ca-key.pem org=jenkins.dockerenv-668100 san=[127.0.0.1 192.168.49.2 dockerenv-668100 localhost minikube]
I0904 06:26:41.829759 897361 provision.go:177] copyRemoteCerts
I0904 06:26:41.829811 897361 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0904 06:26:41.829855 897361 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-668100
I0904 06:26:41.847108 897361 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33884 SSHKeyPath:/home/jenkins/minikube-integration/21409-875589/.minikube/machines/dockerenv-668100/id_rsa Username:docker}
I0904 06:26:41.938363 897361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-875589/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I0904 06:26:41.964272 897361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-875589/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I0904 06:26:41.989918 897361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-875589/.minikube/machines/server.pem --> /etc/docker/server.pem (1216 bytes)
I0904 06:26:42.017156 897361 provision.go:87] duration metric: took 1.435854653s to configureAuth
I0904 06:26:42.017176 897361 ubuntu.go:206] setting minikube options for container-runtime
I0904 06:26:42.017406 897361 config.go:182] Loaded profile config "dockerenv-668100": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.0
I0904 06:26:42.017412 897361 machine.go:96] duration metric: took 4.8920323s to provisionDockerMachine
I0904 06:26:42.017418 897361 client.go:171] duration metric: took 10.557096934s to LocalClient.Create
I0904 06:26:42.017445 897361 start.go:167] duration metric: took 10.557153961s to libmachine.API.Create "dockerenv-668100"
I0904 06:26:42.017453 897361 start.go:293] postStartSetup for "dockerenv-668100" (driver="docker")
I0904 06:26:42.017463 897361 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0904 06:26:42.017517 897361 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0904 06:26:42.017556 897361 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-668100
I0904 06:26:42.037008 897361 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33884 SSHKeyPath:/home/jenkins/minikube-integration/21409-875589/.minikube/machines/dockerenv-668100/id_rsa Username:docker}
I0904 06:26:42.137368 897361 ssh_runner.go:195] Run: cat /etc/os-release
I0904 06:26:42.142876 897361 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0904 06:26:42.142903 897361 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0904 06:26:42.142912 897361 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0904 06:26:42.142919 897361 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I0904 06:26:42.142930 897361 filesync.go:126] Scanning /home/jenkins/minikube-integration/21409-875589/.minikube/addons for local assets ...
I0904 06:26:42.143007 897361 filesync.go:126] Scanning /home/jenkins/minikube-integration/21409-875589/.minikube/files for local assets ...
I0904 06:26:42.143030 897361 start.go:296] duration metric: took 125.571406ms for postStartSetup
I0904 06:26:42.143418 897361 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" dockerenv-668100
I0904 06:26:42.165089 897361 profile.go:143] Saving config to /home/jenkins/minikube-integration/21409-875589/.minikube/profiles/dockerenv-668100/config.json ...
I0904 06:26:42.165414 897361 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0904 06:26:42.165466 897361 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-668100
I0904 06:26:42.186768 897361 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33884 SSHKeyPath:/home/jenkins/minikube-integration/21409-875589/.minikube/machines/dockerenv-668100/id_rsa Username:docker}
I0904 06:26:42.283420 897361 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0904 06:26:42.288625 897361 start.go:128] duration metric: took 10.831925351s to createHost
I0904 06:26:42.288641 897361 start.go:83] releasing machines lock for "dockerenv-668100", held for 10.83205163s
I0904 06:26:42.288723 897361 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" dockerenv-668100
I0904 06:26:42.306964 897361 ssh_runner.go:195] Run: cat /version.json
I0904 06:26:42.307002 897361 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0904 06:26:42.307018 897361 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-668100
I0904 06:26:42.307052 897361 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-668100
I0904 06:26:42.328479 897361 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33884 SSHKeyPath:/home/jenkins/minikube-integration/21409-875589/.minikube/machines/dockerenv-668100/id_rsa Username:docker}
I0904 06:26:42.337216 897361 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33884 SSHKeyPath:/home/jenkins/minikube-integration/21409-875589/.minikube/machines/dockerenv-668100/id_rsa Username:docker}
I0904 06:26:42.547480 897361 ssh_runner.go:195] Run: systemctl --version
I0904 06:26:42.555320 897361 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0904 06:26:42.559983 897361 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0904 06:26:42.586188 897361 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0904 06:26:42.586258 897361 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0904 06:26:42.620239 897361 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0904 06:26:42.620258 897361 start.go:495] detecting cgroup driver to use...
I0904 06:26:42.620293 897361 detect.go:187] detected "cgroupfs" cgroup driver on host os
I0904 06:26:42.620341 897361 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I0904 06:26:42.633339 897361 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0904 06:26:42.645359 897361 docker.go:218] disabling cri-docker service (if available) ...
I0904 06:26:42.645419 897361 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I0904 06:26:42.660212 897361 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I0904 06:26:42.675541 897361 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I0904 06:26:42.767809 897361 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I0904 06:26:42.868714 897361 docker.go:234] disabling docker service ...
I0904 06:26:42.868774 897361 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I0904 06:26:42.891527 897361 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I0904 06:26:42.903111 897361 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I0904 06:26:43.001747 897361 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I0904 06:26:43.095711 897361 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I0904 06:26:43.107567 897361 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0904 06:26:43.124135 897361 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I0904 06:26:43.134515 897361 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0904 06:26:43.144730 897361 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I0904 06:26:43.144793 897361 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I0904 06:26:43.155001 897361 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0904 06:26:43.165063 897361 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0904 06:26:43.175267 897361 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0904 06:26:43.185646 897361 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0904 06:26:43.195018 897361 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0904 06:26:43.204862 897361 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0904 06:26:43.214684 897361 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0904 06:26:43.224932 897361 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0904 06:26:43.233933 897361 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0904 06:26:43.242394 897361 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0904 06:26:43.329493 897361 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0904 06:26:43.450262 897361 start.go:542] Will wait 60s for socket path /run/containerd/containerd.sock
I0904 06:26:43.450322 897361 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I0904 06:26:43.454107 897361 start.go:563] Will wait 60s for crictl version
I0904 06:26:43.454161 897361 ssh_runner.go:195] Run: which crictl
I0904 06:26:43.457679 897361 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0904 06:26:43.495066 897361 start.go:579] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: 1.7.27
RuntimeApiVersion: v1
I0904 06:26:43.495122 897361 ssh_runner.go:195] Run: containerd --version
I0904 06:26:43.521088 897361 ssh_runner.go:195] Run: containerd --version
I0904 06:26:43.550487 897361 out.go:179] * Preparing Kubernetes v1.34.0 on containerd 1.7.27 ...
I0904 06:26:43.553382 897361 cli_runner.go:164] Run: docker network inspect dockerenv-668100 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0904 06:26:43.570634 897361 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0904 06:26:43.574300 897361 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0904 06:26:43.589775 897361 kubeadm.go:875] updating cluster {Name:dockerenv-668100 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:dockerenv-668100 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APISe
rverNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock:
SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I0904 06:26:43.589899 897361 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime containerd
I0904 06:26:43.589958 897361 ssh_runner.go:195] Run: sudo crictl images --output json
I0904 06:26:43.625415 897361 containerd.go:627] all images are preloaded for containerd runtime.
I0904 06:26:43.625427 897361 containerd.go:534] Images already preloaded, skipping extraction
I0904 06:26:43.625486 897361 ssh_runner.go:195] Run: sudo crictl images --output json
I0904 06:26:43.661914 897361 containerd.go:627] all images are preloaded for containerd runtime.
I0904 06:26:43.661927 897361 cache_images.go:85] Images are preloaded, skipping loading
I0904 06:26:43.661935 897361 kubeadm.go:926] updating node { 192.168.49.2 8443 v1.34.0 containerd true true} ...
I0904 06:26:43.662042 897361 kubeadm.go:938] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=dockerenv-668100 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
[Install]
config:
{KubernetesVersion:v1.34.0 ClusterName:dockerenv-668100 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0904 06:26:43.662113 897361 ssh_runner.go:195] Run: sudo crictl info
I0904 06:26:43.705445 897361 cni.go:84] Creating CNI manager for ""
I0904 06:26:43.705457 897361 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I0904 06:26:43.705465 897361 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I0904 06:26:43.705488 897361 kubeadm.go:189] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.34.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:dockerenv-668100 NodeName:dockerenv-668100 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPat
h:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0904 06:26:43.705598 897361 kubeadm.go:195] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.49.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "dockerenv-668100"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.49.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0904 06:26:43.705669 897361 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.0
I0904 06:26:43.715192 897361 binaries.go:44] Found k8s binaries, skipping transfer
I0904 06:26:43.715256 897361 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I0904 06:26:43.724103 897361 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (320 bytes)
I0904 06:26:43.742347 897361 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0904 06:26:43.761142 897361 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2229 bytes)
I0904 06:26:43.779574 897361 ssh_runner.go:195] Run: grep 192.168.49.2 control-plane.minikube.internal$ /etc/hosts
I0904 06:26:43.782934 897361 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0904 06:26:43.793900 897361 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0904 06:26:43.874301 897361 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0904 06:26:43.889806 897361 certs.go:68] Setting up /home/jenkins/minikube-integration/21409-875589/.minikube/profiles/dockerenv-668100 for IP: 192.168.49.2
I0904 06:26:43.889817 897361 certs.go:194] generating shared ca certs ...
I0904 06:26:43.889832 897361 certs.go:226] acquiring lock for ca certs: {Name:mk68a829d29b2e2571b1ce9f16db9b9845de8f29 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0904 06:26:43.890000 897361 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21409-875589/.minikube/ca.key
I0904 06:26:43.890040 897361 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21409-875589/.minikube/proxy-client-ca.key
I0904 06:26:43.890046 897361 certs.go:256] generating profile certs ...
I0904 06:26:43.890107 897361 certs.go:363] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21409-875589/.minikube/profiles/dockerenv-668100/client.key
I0904 06:26:43.890116 897361 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21409-875589/.minikube/profiles/dockerenv-668100/client.crt with IP's: []
I0904 06:26:44.250887 897361 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21409-875589/.minikube/profiles/dockerenv-668100/client.crt ...
I0904 06:26:44.250904 897361 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21409-875589/.minikube/profiles/dockerenv-668100/client.crt: {Name:mkb2455c9be18712fac8627ef07a6ef9d239fcda Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0904 06:26:44.251697 897361 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21409-875589/.minikube/profiles/dockerenv-668100/client.key ...
I0904 06:26:44.251707 897361 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21409-875589/.minikube/profiles/dockerenv-668100/client.key: {Name:mk929132be16f1cb47c792cef4f456c3eda9634b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0904 06:26:44.252331 897361 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21409-875589/.minikube/profiles/dockerenv-668100/apiserver.key.cfa3fe7d
I0904 06:26:44.252344 897361 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21409-875589/.minikube/profiles/dockerenv-668100/apiserver.crt.cfa3fe7d with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2]
I0904 06:26:44.526309 897361 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21409-875589/.minikube/profiles/dockerenv-668100/apiserver.crt.cfa3fe7d ...
I0904 06:26:44.526325 897361 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21409-875589/.minikube/profiles/dockerenv-668100/apiserver.crt.cfa3fe7d: {Name:mk38d93f669355f5ab7dc5b0e5061eb71d233206 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0904 06:26:44.527009 897361 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21409-875589/.minikube/profiles/dockerenv-668100/apiserver.key.cfa3fe7d ...
I0904 06:26:44.527019 897361 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21409-875589/.minikube/profiles/dockerenv-668100/apiserver.key.cfa3fe7d: {Name:mk64945c2c916f96625609fb69cf02a40980c3f6 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0904 06:26:44.527639 897361 certs.go:381] copying /home/jenkins/minikube-integration/21409-875589/.minikube/profiles/dockerenv-668100/apiserver.crt.cfa3fe7d -> /home/jenkins/minikube-integration/21409-875589/.minikube/profiles/dockerenv-668100/apiserver.crt
I0904 06:26:44.527726 897361 certs.go:385] copying /home/jenkins/minikube-integration/21409-875589/.minikube/profiles/dockerenv-668100/apiserver.key.cfa3fe7d -> /home/jenkins/minikube-integration/21409-875589/.minikube/profiles/dockerenv-668100/apiserver.key
I0904 06:26:44.527783 897361 certs.go:363] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21409-875589/.minikube/profiles/dockerenv-668100/proxy-client.key
I0904 06:26:44.527795 897361 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21409-875589/.minikube/profiles/dockerenv-668100/proxy-client.crt with IP's: []
I0904 06:26:45.486956 897361 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21409-875589/.minikube/profiles/dockerenv-668100/proxy-client.crt ...
I0904 06:26:45.486985 897361 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21409-875589/.minikube/profiles/dockerenv-668100/proxy-client.crt: {Name:mk01c576cd517d50d1b844c99b7312ec0c289593 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0904 06:26:45.487196 897361 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21409-875589/.minikube/profiles/dockerenv-668100/proxy-client.key ...
I0904 06:26:45.487205 897361 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21409-875589/.minikube/profiles/dockerenv-668100/proxy-client.key: {Name:mk7e871a26697ff498b395cf069fe229de4029b1 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0904 06:26:45.487946 897361 certs.go:484] found cert: /home/jenkins/minikube-integration/21409-875589/.minikube/certs/ca-key.pem (1675 bytes)
I0904 06:26:45.487985 897361 certs.go:484] found cert: /home/jenkins/minikube-integration/21409-875589/.minikube/certs/ca.pem (1082 bytes)
I0904 06:26:45.488011 897361 certs.go:484] found cert: /home/jenkins/minikube-integration/21409-875589/.minikube/certs/cert.pem (1123 bytes)
I0904 06:26:45.488034 897361 certs.go:484] found cert: /home/jenkins/minikube-integration/21409-875589/.minikube/certs/key.pem (1675 bytes)
I0904 06:26:45.488588 897361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-875589/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0904 06:26:45.515211 897361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-875589/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I0904 06:26:45.541671 897361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-875589/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0904 06:26:45.567478 897361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-875589/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I0904 06:26:45.592627 897361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-875589/.minikube/profiles/dockerenv-668100/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I0904 06:26:45.618497 897361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-875589/.minikube/profiles/dockerenv-668100/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I0904 06:26:45.644780 897361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-875589/.minikube/profiles/dockerenv-668100/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0904 06:26:45.669792 897361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-875589/.minikube/profiles/dockerenv-668100/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I0904 06:26:45.694876 897361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-875589/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0904 06:26:45.720065 897361 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0904 06:26:45.738353 897361 ssh_runner.go:195] Run: openssl version
I0904 06:26:45.747135 897361 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0904 06:26:45.757760 897361 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0904 06:26:45.761279 897361 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Sep 4 06:20 /usr/share/ca-certificates/minikubeCA.pem
I0904 06:26:45.761337 897361 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0904 06:26:45.769178 897361 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0904 06:26:45.779026 897361 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0904 06:26:45.782583 897361 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0904 06:26:45.782620 897361 kubeadm.go:392] StartCluster: {Name:dockerenv-668100 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:dockerenv-668100 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServe
rNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSH
AgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0904 06:26:45.782677 897361 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I0904 06:26:45.782733 897361 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I0904 06:26:45.827401 897361 cri.go:89] found id: ""
I0904 06:26:45.827464 897361 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0904 06:26:45.836588 897361 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0904 06:26:45.845681 897361 kubeadm.go:214] ignoring SystemVerification for kubeadm because of docker driver
I0904 06:26:45.845751 897361 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0904 06:26:45.855018 897361 kubeadm.go:155] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0904 06:26:45.855027 897361 kubeadm.go:157] found existing configuration files:
I0904 06:26:45.855081 897361 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I0904 06:26:45.864065 897361 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I0904 06:26:45.864119 897361 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I0904 06:26:45.872724 897361 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I0904 06:26:45.882122 897361 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I0904 06:26:45.882181 897361 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I0904 06:26:45.890900 897361 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I0904 06:26:45.899747 897361 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I0904 06:26:45.899812 897361 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0904 06:26:45.908746 897361 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I0904 06:26:45.917671 897361 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I0904 06:26:45.917726 897361 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0904 06:26:45.926119 897361 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0904 06:26:45.986712 897361 kubeadm.go:310] [WARNING SystemVerification]: cgroups v1 support is in maintenance mode, please migrate to cgroups v2
I0904 06:26:45.986936 897361 kubeadm.go:310] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
I0904 06:26:46.066404 897361 kubeadm.go:310] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0904 06:27:04.307353 897361 kubeadm.go:310] [init] Using Kubernetes version: v1.34.0
I0904 06:27:04.307403 897361 kubeadm.go:310] [preflight] Running pre-flight checks
I0904 06:27:04.307491 897361 kubeadm.go:310] [preflight] The system verification failed. Printing the output from the verification:
I0904 06:27:04.307546 897361 kubeadm.go:310] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
I0904 06:27:04.307580 897361 kubeadm.go:310] [0;37mOS[0m: [0;32mLinux[0m
I0904 06:27:04.307625 897361 kubeadm.go:310] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0904 06:27:04.307673 897361 kubeadm.go:310] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I0904 06:27:04.307720 897361 kubeadm.go:310] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0904 06:27:04.307767 897361 kubeadm.go:310] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0904 06:27:04.307815 897361 kubeadm.go:310] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0904 06:27:04.307863 897361 kubeadm.go:310] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0904 06:27:04.307908 897361 kubeadm.go:310] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0904 06:27:04.307956 897361 kubeadm.go:310] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0904 06:27:04.308002 897361 kubeadm.go:310] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I0904 06:27:04.308075 897361 kubeadm.go:310] [preflight] Pulling images required for setting up a Kubernetes cluster
I0904 06:27:04.308169 897361 kubeadm.go:310] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0904 06:27:04.308267 897361 kubeadm.go:310] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I0904 06:27:04.308330 897361 kubeadm.go:310] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0904 06:27:04.311308 897361 out.go:252] - Generating certificates and keys ...
I0904 06:27:04.311411 897361 kubeadm.go:310] [certs] Using existing ca certificate authority
I0904 06:27:04.311476 897361 kubeadm.go:310] [certs] Using existing apiserver certificate and key on disk
I0904 06:27:04.311548 897361 kubeadm.go:310] [certs] Generating "apiserver-kubelet-client" certificate and key
I0904 06:27:04.311605 897361 kubeadm.go:310] [certs] Generating "front-proxy-ca" certificate and key
I0904 06:27:04.311665 897361 kubeadm.go:310] [certs] Generating "front-proxy-client" certificate and key
I0904 06:27:04.311715 897361 kubeadm.go:310] [certs] Generating "etcd/ca" certificate and key
I0904 06:27:04.311781 897361 kubeadm.go:310] [certs] Generating "etcd/server" certificate and key
I0904 06:27:04.311919 897361 kubeadm.go:310] [certs] etcd/server serving cert is signed for DNS names [dockerenv-668100 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0904 06:27:04.311977 897361 kubeadm.go:310] [certs] Generating "etcd/peer" certificate and key
I0904 06:27:04.312123 897361 kubeadm.go:310] [certs] etcd/peer serving cert is signed for DNS names [dockerenv-668100 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0904 06:27:04.312204 897361 kubeadm.go:310] [certs] Generating "etcd/healthcheck-client" certificate and key
I0904 06:27:04.312272 897361 kubeadm.go:310] [certs] Generating "apiserver-etcd-client" certificate and key
I0904 06:27:04.312348 897361 kubeadm.go:310] [certs] Generating "sa" key and public key
I0904 06:27:04.312404 897361 kubeadm.go:310] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0904 06:27:04.312455 897361 kubeadm.go:310] [kubeconfig] Writing "admin.conf" kubeconfig file
I0904 06:27:04.312512 897361 kubeadm.go:310] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I0904 06:27:04.312565 897361 kubeadm.go:310] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0904 06:27:04.312637 897361 kubeadm.go:310] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0904 06:27:04.312700 897361 kubeadm.go:310] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0904 06:27:04.312787 897361 kubeadm.go:310] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0904 06:27:04.312882 897361 kubeadm.go:310] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0904 06:27:04.317923 897361 out.go:252] - Booting up control plane ...
I0904 06:27:04.318036 897361 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0904 06:27:04.318113 897361 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0904 06:27:04.318179 897361 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0904 06:27:04.318288 897361 kubeadm.go:310] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0904 06:27:04.318382 897361 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I0904 06:27:04.318486 897361 kubeadm.go:310] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I0904 06:27:04.318570 897361 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0904 06:27:04.318608 897361 kubeadm.go:310] [kubelet-start] Starting the kubelet
I0904 06:27:04.318739 897361 kubeadm.go:310] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I0904 06:27:04.318844 897361 kubeadm.go:310] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I0904 06:27:04.318901 897361 kubeadm.go:310] [kubelet-check] The kubelet is healthy after 501.78182ms
I0904 06:27:04.318993 897361 kubeadm.go:310] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
I0904 06:27:04.319073 897361 kubeadm.go:310] [control-plane-check] Checking kube-apiserver at https://192.168.49.2:8443/livez
I0904 06:27:04.319167 897361 kubeadm.go:310] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
I0904 06:27:04.319245 897361 kubeadm.go:310] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
I0904 06:27:04.319321 897361 kubeadm.go:310] [control-plane-check] kube-controller-manager is healthy after 3.889290454s
I0904 06:27:04.319390 897361 kubeadm.go:310] [control-plane-check] kube-scheduler is healthy after 6.452711964s
I0904 06:27:04.319457 897361 kubeadm.go:310] [control-plane-check] kube-apiserver is healthy after 7.003703728s
I0904 06:27:04.319563 897361 kubeadm.go:310] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0904 06:27:04.319688 897361 kubeadm.go:310] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I0904 06:27:04.319752 897361 kubeadm.go:310] [upload-certs] Skipping phase. Please see --upload-certs
I0904 06:27:04.319937 897361 kubeadm.go:310] [mark-control-plane] Marking the node dockerenv-668100 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I0904 06:27:04.319993 897361 kubeadm.go:310] [bootstrap-token] Using token: jgzjt9.0m09xs3lmsari5xi
I0904 06:27:04.322898 897361 out.go:252] - Configuring RBAC rules ...
I0904 06:27:04.323020 897361 kubeadm.go:310] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I0904 06:27:04.323125 897361 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I0904 06:27:04.323274 897361 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I0904 06:27:04.323403 897361 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I0904 06:27:04.323561 897361 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I0904 06:27:04.323654 897361 kubeadm.go:310] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I0904 06:27:04.323781 897361 kubeadm.go:310] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I0904 06:27:04.323828 897361 kubeadm.go:310] [addons] Applied essential addon: CoreDNS
I0904 06:27:04.323873 897361 kubeadm.go:310] [addons] Applied essential addon: kube-proxy
I0904 06:27:04.323876 897361 kubeadm.go:310]
I0904 06:27:04.323940 897361 kubeadm.go:310] Your Kubernetes control-plane has initialized successfully!
I0904 06:27:04.323944 897361 kubeadm.go:310]
I0904 06:27:04.324048 897361 kubeadm.go:310] To start using your cluster, you need to run the following as a regular user:
I0904 06:27:04.324053 897361 kubeadm.go:310]
I0904 06:27:04.324080 897361 kubeadm.go:310] mkdir -p $HOME/.kube
I0904 06:27:04.324138 897361 kubeadm.go:310] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I0904 06:27:04.324188 897361 kubeadm.go:310] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I0904 06:27:04.324192 897361 kubeadm.go:310]
I0904 06:27:04.324245 897361 kubeadm.go:310] Alternatively, if you are the root user, you can run:
I0904 06:27:04.324248 897361 kubeadm.go:310]
I0904 06:27:04.324297 897361 kubeadm.go:310] export KUBECONFIG=/etc/kubernetes/admin.conf
I0904 06:27:04.324301 897361 kubeadm.go:310]
I0904 06:27:04.324356 897361 kubeadm.go:310] You should now deploy a pod network to the cluster.
I0904 06:27:04.324471 897361 kubeadm.go:310] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I0904 06:27:04.324567 897361 kubeadm.go:310] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I0904 06:27:04.324575 897361 kubeadm.go:310]
I0904 06:27:04.324681 897361 kubeadm.go:310] You can now join any number of control-plane nodes by copying certificate authorities
I0904 06:27:04.324772 897361 kubeadm.go:310] and service account keys on each node and then running the following as root:
I0904 06:27:04.324775 897361 kubeadm.go:310]
I0904 06:27:04.324878 897361 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token jgzjt9.0m09xs3lmsari5xi \
I0904 06:27:04.324992 897361 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:b015c4983f19e3224c1e79ee70ccbcf131b704362e85d3e278f8097e427041d1 \
I0904 06:27:04.325024 897361 kubeadm.go:310] --control-plane
I0904 06:27:04.325028 897361 kubeadm.go:310]
I0904 06:27:04.325159 897361 kubeadm.go:310] Then you can join any number of worker nodes by running the following on each as root:
I0904 06:27:04.325166 897361 kubeadm.go:310]
I0904 06:27:04.325250 897361 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token jgzjt9.0m09xs3lmsari5xi \
I0904 06:27:04.325370 897361 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:b015c4983f19e3224c1e79ee70ccbcf131b704362e85d3e278f8097e427041d1
I0904 06:27:04.325378 897361 cni.go:84] Creating CNI manager for ""
I0904 06:27:04.325384 897361 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I0904 06:27:04.330302 897361 out.go:179] * Configuring CNI (Container Networking Interface) ...
I0904 06:27:04.333211 897361 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I0904 06:27:04.337445 897361 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.34.0/kubectl ...
I0904 06:27:04.337455 897361 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I0904 06:27:04.357935 897361 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I0904 06:27:04.671308 897361 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I0904 06:27:04.671390 897361 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I0904 06:27:04.671454 897361 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes dockerenv-668100 minikube.k8s.io/updated_at=2025_09_04T06_27_04_0700 minikube.k8s.io/version=v1.36.0 minikube.k8s.io/commit=c3fa37de45a2901b215fab008201edf72ce5a1ff minikube.k8s.io/name=dockerenv-668100 minikube.k8s.io/primary=true
I0904 06:27:04.914708 897361 ops.go:34] apiserver oom_adj: -16
I0904 06:27:04.914741 897361 kubeadm.go:1105] duration metric: took 243.420241ms to wait for elevateKubeSystemPrivileges
I0904 06:27:04.914753 897361 kubeadm.go:394] duration metric: took 19.132136419s to StartCluster
I0904 06:27:04.914769 897361 settings.go:142] acquiring lock: {Name:mk9c58582abe05a5564762391c515fb51268bf5b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0904 06:27:04.914829 897361 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21409-875589/kubeconfig
I0904 06:27:04.915468 897361 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21409-875589/kubeconfig: {Name:mk31755a028adb6a990e615720c4f523c928982d Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0904 06:27:04.915696 897361 start.go:235] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0904 06:27:04.915824 897361 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I0904 06:27:04.916068 897361 config.go:182] Loaded profile config "dockerenv-668100": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.0
I0904 06:27:04.916103 897361 addons.go:511] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I0904 06:27:04.916162 897361 addons.go:69] Setting storage-provisioner=true in profile "dockerenv-668100"
I0904 06:27:04.916174 897361 addons.go:238] Setting addon storage-provisioner=true in "dockerenv-668100"
I0904 06:27:04.916196 897361 host.go:66] Checking if "dockerenv-668100" exists ...
I0904 06:27:04.916682 897361 cli_runner.go:164] Run: docker container inspect dockerenv-668100 --format={{.State.Status}}
I0904 06:27:04.917001 897361 addons.go:69] Setting default-storageclass=true in profile "dockerenv-668100"
I0904 06:27:04.917012 897361 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "dockerenv-668100"
I0904 06:27:04.917324 897361 cli_runner.go:164] Run: docker container inspect dockerenv-668100 --format={{.State.Status}}
I0904 06:27:04.919140 897361 out.go:179] * Verifying Kubernetes components...
I0904 06:27:04.925156 897361 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0904 06:27:04.966336 897361 addons.go:238] Setting addon default-storageclass=true in "dockerenv-668100"
I0904 06:27:04.966366 897361 host.go:66] Checking if "dockerenv-668100" exists ...
I0904 06:27:04.966798 897361 cli_runner.go:164] Run: docker container inspect dockerenv-668100 --format={{.State.Status}}
I0904 06:27:04.972988 897361 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0904 06:27:04.975810 897361 addons.go:435] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0904 06:27:04.975822 897361 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0904 06:27:04.975891 897361 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-668100
I0904 06:27:04.995066 897361 addons.go:435] installing /etc/kubernetes/addons/storageclass.yaml
I0904 06:27:04.995078 897361 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0904 06:27:04.995145 897361 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-668100
I0904 06:27:05.009425 897361 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33884 SSHKeyPath:/home/jenkins/minikube-integration/21409-875589/.minikube/machines/dockerenv-668100/id_rsa Username:docker}
I0904 06:27:05.031243 897361 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33884 SSHKeyPath:/home/jenkins/minikube-integration/21409-875589/.minikube/machines/dockerenv-668100/id_rsa Username:docker}
I0904 06:27:05.194402 897361 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I0904 06:27:05.194515 897361 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0904 06:27:05.232777 897361 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0904 06:27:05.273940 897361 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0904 06:27:05.593356 897361 start.go:976] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
I0904 06:27:05.596120 897361 api_server.go:52] waiting for apiserver process to appear ...
I0904 06:27:05.596187 897361 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0904 06:27:05.802353 897361 api_server.go:72] duration metric: took 886.627031ms to wait for apiserver process to appear ...
I0904 06:27:05.802364 897361 api_server.go:88] waiting for apiserver healthz status ...
I0904 06:27:05.802382 897361 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I0904 06:27:05.814705 897361 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I0904 06:27:05.816237 897361 api_server.go:141] control plane version: v1.34.0
I0904 06:27:05.816252 897361 api_server.go:131] duration metric: took 13.883215ms to wait for apiserver health ...
I0904 06:27:05.816260 897361 system_pods.go:43] waiting for kube-system pods to appear ...
I0904 06:27:05.819666 897361 system_pods.go:59] 5 kube-system pods found
I0904 06:27:05.819686 897361 system_pods.go:61] "etcd-dockerenv-668100" [b855a968-c0d4-4905-96a9-fc3dff0c4682] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I0904 06:27:05.819693 897361 system_pods.go:61] "kube-apiserver-dockerenv-668100" [db48beda-0b2c-4fa5-976d-dea79c59d0f3] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I0904 06:27:05.819701 897361 system_pods.go:61] "kube-controller-manager-dockerenv-668100" [3d4f5233-1095-4ec3-a9ca-45c82bc77424] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I0904 06:27:05.819707 897361 system_pods.go:61] "kube-scheduler-dockerenv-668100" [69b85745-80a0-4751-9a04-1e8986ba5b28] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I0904 06:27:05.819712 897361 system_pods.go:61] "storage-provisioner" [71c4cc9e-d8f4-4c61-811d-34ef62cc03d3] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }. no new claims to deallocate, preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling.)
I0904 06:27:05.819717 897361 system_pods.go:74] duration metric: took 3.452541ms to wait for pod list to return data ...
I0904 06:27:05.819727 897361 kubeadm.go:578] duration metric: took 904.007071ms to wait for: map[apiserver:true system_pods:true]
I0904 06:27:05.819739 897361 node_conditions.go:102] verifying NodePressure condition ...
I0904 06:27:05.820843 897361 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I0904 06:27:05.823407 897361 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
I0904 06:27:05.823427 897361 node_conditions.go:123] node cpu capacity is 2
I0904 06:27:05.823438 897361 node_conditions.go:105] duration metric: took 3.695539ms to run NodePressure ...
I0904 06:27:05.823450 897361 start.go:241] waiting for startup goroutines ...
I0904 06:27:05.823971 897361 addons.go:514] duration metric: took 907.857312ms for enable addons: enabled=[storage-provisioner default-storageclass]
I0904 06:27:06.097823 897361 kapi.go:214] "coredns" deployment in "kube-system" namespace and "dockerenv-668100" context rescaled to 1 replicas
I0904 06:27:06.097853 897361 start.go:246] waiting for cluster config update ...
I0904 06:27:06.097863 897361 start.go:255] writing updated cluster config ...
I0904 06:27:06.098173 897361 ssh_runner.go:195] Run: rm -f paused
I0904 06:27:06.163017 897361 start.go:617] kubectl: 1.33.2, cluster: 1.34.0 (minor skew: 1)
I0904 06:27:06.166248 897361 out.go:179] * Done! kubectl is now configured to use "dockerenv-668100" cluster and "default" namespace by default
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
94e77ae694a38 ba04bb24b9575 9 seconds ago Running storage-provisioner 0 727511983fdb1 storage-provisioner
380b173f1f0b6 6fc32d66c1411 10 seconds ago Running kube-proxy 0 71068b21dcc6a kube-proxy-cnl9w
882fe50ff0640 b1a8c6f707935 10 seconds ago Running kindnet-cni 0 5c635449e6a8b kindnet-gktdj
1cd1d9f20ef8a 996be7e86d9b3 24 seconds ago Running kube-controller-manager 0 f3141d4ebe6f5 kube-controller-manager-dockerenv-668100
6ae62be481cbe a25f5ef9c34c3 24 seconds ago Running kube-scheduler 0 32c2b1cb71e25 kube-scheduler-dockerenv-668100
7d5b83c3116ee a1894772a478e 24 seconds ago Running etcd 0 0a96949b99cab etcd-dockerenv-668100
1ee90978e53f4 d291939e99406 24 seconds ago Running kube-apiserver 0 0618a2426e275 kube-apiserver-dockerenv-668100
==> containerd <==
Sep 04 06:26:56 dockerenv-668100 containerd[833]: time="2025-09-04T06:26:56.677974657Z" level=info msg="StartContainer for \"1cd1d9f20ef8ade227088850e8a4b70283e0db55a19acab86c1d990dfd1c836e\" returns successfully"
Sep 04 06:27:09 dockerenv-668100 containerd[833]: time="2025-09-04T06:27:09.966772873Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bc5c9577-sjph7,Uid:f5428f91-007a-40ef-aea7-9ed4ae42021f,Namespace:kube-system,Attempt:0,}"
Sep 04 06:27:10 dockerenv-668100 containerd[833]: time="2025-09-04T06:27:10.011434256Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bc5c9577-sjph7,Uid:f5428f91-007a-40ef-aea7-9ed4ae42021f,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"d3be208230571470bdd12d3803e95a1a23265b4f0ad5e94e586c896d4631f633\": failed to find network info for sandbox \"d3be208230571470bdd12d3803e95a1a23265b4f0ad5e94e586c896d4631f633\""
Sep 04 06:27:10 dockerenv-668100 containerd[833]: time="2025-09-04T06:27:10.270098699Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kindnet-gktdj,Uid:4858c187-b6a5-4f05-bbe2-c1e23c6af60c,Namespace:kube-system,Attempt:0,}"
Sep 04 06:27:10 dockerenv-668100 containerd[833]: time="2025-09-04T06:27:10.285660717Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kube-proxy-cnl9w,Uid:2c703103-b24a-4f1e-83c0-e70b47e465ae,Namespace:kube-system,Attempt:0,}"
Sep 04 06:27:10 dockerenv-668100 containerd[833]: time="2025-09-04T06:27:10.367809457Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kindnet-gktdj,Uid:4858c187-b6a5-4f05-bbe2-c1e23c6af60c,Namespace:kube-system,Attempt:0,} returns sandbox id \"5c635449e6a8b11b98dcde8e4814ca82e8f66b4382ea62f343a985e7ed33020d\""
Sep 04 06:27:10 dockerenv-668100 containerd[833]: time="2025-09-04T06:27:10.378783360Z" level=info msg="CreateContainer within sandbox \"5c635449e6a8b11b98dcde8e4814ca82e8f66b4382ea62f343a985e7ed33020d\" for container &ContainerMetadata{Name:kindnet-cni,Attempt:0,}"
Sep 04 06:27:10 dockerenv-668100 containerd[833]: time="2025-09-04T06:27:10.383953195Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kube-proxy-cnl9w,Uid:2c703103-b24a-4f1e-83c0-e70b47e465ae,Namespace:kube-system,Attempt:0,} returns sandbox id \"71068b21dcc6a0baddb2537720334153b1cb6845f04513fd94a56827a0ff578b\""
Sep 04 06:27:10 dockerenv-668100 containerd[833]: time="2025-09-04T06:27:10.391589390Z" level=info msg="CreateContainer within sandbox \"71068b21dcc6a0baddb2537720334153b1cb6845f04513fd94a56827a0ff578b\" for container &ContainerMetadata{Name:kube-proxy,Attempt:0,}"
Sep 04 06:27:10 dockerenv-668100 containerd[833]: time="2025-09-04T06:27:10.407678144Z" level=info msg="CreateContainer within sandbox \"5c635449e6a8b11b98dcde8e4814ca82e8f66b4382ea62f343a985e7ed33020d\" for &ContainerMetadata{Name:kindnet-cni,Attempt:0,} returns container id \"882fe50ff0640fcc6be702ebd8fb9bb219cb6ed580178c5642230626d1843c44\""
Sep 04 06:27:10 dockerenv-668100 containerd[833]: time="2025-09-04T06:27:10.408815154Z" level=info msg="StartContainer for \"882fe50ff0640fcc6be702ebd8fb9bb219cb6ed580178c5642230626d1843c44\""
Sep 04 06:27:10 dockerenv-668100 containerd[833]: time="2025-09-04T06:27:10.416964127Z" level=info msg="CreateContainer within sandbox \"71068b21dcc6a0baddb2537720334153b1cb6845f04513fd94a56827a0ff578b\" for &ContainerMetadata{Name:kube-proxy,Attempt:0,} returns container id \"380b173f1f0b66abb94a52bc25f3037e00c21670f7fb2a304fcc5d5751ffae9a\""
Sep 04 06:27:10 dockerenv-668100 containerd[833]: time="2025-09-04T06:27:10.419772581Z" level=info msg="StartContainer for \"380b173f1f0b66abb94a52bc25f3037e00c21670f7fb2a304fcc5d5751ffae9a\""
Sep 04 06:27:10 dockerenv-668100 containerd[833]: time="2025-09-04T06:27:10.501482479Z" level=info msg="StartContainer for \"380b173f1f0b66abb94a52bc25f3037e00c21670f7fb2a304fcc5d5751ffae9a\" returns successfully"
Sep 04 06:27:10 dockerenv-668100 containerd[833]: time="2025-09-04T06:27:10.559088191Z" level=info msg="StartContainer for \"882fe50ff0640fcc6be702ebd8fb9bb219cb6ed580178c5642230626d1843c44\" returns successfully"
Sep 04 06:27:10 dockerenv-668100 containerd[833]: time="2025-09-04T06:27:10.572953018Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:storage-provisioner,Uid:71c4cc9e-d8f4-4c61-811d-34ef62cc03d3,Namespace:kube-system,Attempt:0,}"
Sep 04 06:27:10 dockerenv-668100 containerd[833]: time="2025-09-04T06:27:10.656558503Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:storage-provisioner,Uid:71c4cc9e-d8f4-4c61-811d-34ef62cc03d3,Namespace:kube-system,Attempt:0,} returns sandbox id \"727511983fdb14ea4fae2029944cf5d99c53d70bfe20ef210296e8f6d4fafe46\""
Sep 04 06:27:10 dockerenv-668100 containerd[833]: time="2025-09-04T06:27:10.666421291Z" level=info msg="CreateContainer within sandbox \"727511983fdb14ea4fae2029944cf5d99c53d70bfe20ef210296e8f6d4fafe46\" for container &ContainerMetadata{Name:storage-provisioner,Attempt:0,}"
Sep 04 06:27:10 dockerenv-668100 containerd[833]: time="2025-09-04T06:27:10.685648465Z" level=info msg="CreateContainer within sandbox \"727511983fdb14ea4fae2029944cf5d99c53d70bfe20ef210296e8f6d4fafe46\" for &ContainerMetadata{Name:storage-provisioner,Attempt:0,} returns container id \"94e77ae694a386fd3c954fbe0ec6379751ed420a5f54c632274658230e01f8cf\""
Sep 04 06:27:10 dockerenv-668100 containerd[833]: time="2025-09-04T06:27:10.686579326Z" level=info msg="StartContainer for \"94e77ae694a386fd3c954fbe0ec6379751ed420a5f54c632274658230e01f8cf\""
Sep 04 06:27:10 dockerenv-668100 containerd[833]: time="2025-09-04T06:27:10.777440897Z" level=info msg="StartContainer for \"94e77ae694a386fd3c954fbe0ec6379751ed420a5f54c632274658230e01f8cf\" returns successfully"
Sep 04 06:27:14 dockerenv-668100 containerd[833]: time="2025-09-04T06:27:14.036312198Z" level=info msg="No cni config template is specified, wait for other system components to drop the config."
Sep 04 06:27:18 dockerenv-668100 containerd[833]: time="2025-09-04T06:27:18.607431694Z" level=info msg="ImageCreate event name:\"docker.io/local/minikube-dockerenv-containerd-test:latest\""
Sep 04 06:27:18 dockerenv-668100 containerd[833]: time="2025-09-04T06:27:18.621722382Z" level=info msg="ImageCreate event name:\"sha256:55c49c8f4b2e09dc0d5b37eb589b5abf71303b9bbbc3859a5459b25fa9fd0b57\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Sep 04 06:27:18 dockerenv-668100 containerd[833]: time="2025-09-04T06:27:18.622130749Z" level=info msg="ImageUpdate event name:\"docker.io/local/minikube-dockerenv-containerd-test:latest\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
==> describe nodes <==
Name: dockerenv-668100
Roles: control-plane
Labels: beta.kubernetes.io/arch=arm64
beta.kubernetes.io/os=linux
kubernetes.io/arch=arm64
kubernetes.io/hostname=dockerenv-668100
kubernetes.io/os=linux
minikube.k8s.io/commit=c3fa37de45a2901b215fab008201edf72ce5a1ff
minikube.k8s.io/name=dockerenv-668100
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_09_04T06_27_04_0700
minikube.k8s.io/version=v1.36.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Thu, 04 Sep 2025 06:27:00 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: dockerenv-668100
AcquireTime: <unset>
RenewTime: Thu, 04 Sep 2025 06:27:13 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Thu, 04 Sep 2025 06:27:14 +0000 Thu, 04 Sep 2025 06:26:57 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Thu, 04 Sep 2025 06:27:14 +0000 Thu, 04 Sep 2025 06:26:57 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Thu, 04 Sep 2025 06:27:14 +0000 Thu, 04 Sep 2025 06:26:57 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Thu, 04 Sep 2025 06:27:14 +0000 Thu, 04 Sep 2025 06:27:00 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.2
Hostname: dockerenv-668100
Capacity:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022300Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022300Ki
pods: 110
System Info:
Machine ID: 4b5e832d2bd74b8fba9e9819d9c1d589
System UUID: 0207422c-002e-4426-abc7-5742cab1bdba
Boot ID: 73e95979-5845-4235-a957-c3d9397ed2ac
Kernel Version: 5.15.0-1084-aws
OS Image: Ubuntu 22.04.5 LTS
Operating System: linux
Architecture: arm64
Container Runtime Version: containerd://1.7.27
Kubelet Version: v1.34.0
Kube-Proxy Version:
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (8 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
kube-system coredns-66bc5c9577-sjph7 100m (5%) 0 (0%) 70Mi (0%) 170Mi (2%) 11s
kube-system etcd-dockerenv-668100 100m (5%) 0 (0%) 100Mi (1%) 0 (0%) 17s
kube-system kindnet-gktdj 100m (5%) 100m (5%) 50Mi (0%) 50Mi (0%) 11s
kube-system kube-apiserver-dockerenv-668100 250m (12%) 0 (0%) 0 (0%) 0 (0%) 17s
kube-system kube-controller-manager-dockerenv-668100 200m (10%) 0 (0%) 0 (0%) 0 (0%) 17s
kube-system kube-proxy-cnl9w 0 (0%) 0 (0%) 0 (0%) 0 (0%) 11s
kube-system kube-scheduler-dockerenv-668100 100m (5%) 0 (0%) 0 (0%) 0 (0%) 17s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 15s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (42%) 100m (5%)
memory 220Mi (2%) 220Mi (2%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
hugepages-32Mi 0 (0%) 0 (0%)
hugepages-64Ki 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 9s kube-proxy
Normal Starting 17s kubelet Starting kubelet.
Warning CgroupV1 17s kubelet cgroup v1 support is in maintenance mode, please migrate to cgroup v2
Normal NodeAllocatableEnforced 17s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 17s kubelet Node dockerenv-668100 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 17s kubelet Node dockerenv-668100 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 17s kubelet Node dockerenv-668100 status is now: NodeHasSufficientPID
Normal RegisteredNode 12s node-controller Node dockerenv-668100 event: Registered Node dockerenv-668100 in Controller
==> dmesg <==
[Sep 4 05:18] kauditd_printk_skb: 8 callbacks suppressed
[Sep 4 05:30] 9pnet: p9_fd_create_tcp (626527): problem connecting socket to 192.168.49.1
[Sep 4 06:19] kauditd_printk_skb: 8 callbacks suppressed
==> etcd [7d5b83c3116ee734dc27be4059c60fbef025a6b40879d237176b7e5b5df2d798] <==
{"level":"warn","ts":"2025-09-04T06:26:58.737449Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49578","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T06:26:58.750091Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49588","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T06:26:58.778496Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49594","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T06:26:58.795249Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49616","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T06:26:58.807605Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49628","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T06:26:58.831641Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49644","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T06:26:58.852328Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49662","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T06:26:58.869327Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49668","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T06:26:58.882697Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49694","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T06:26:58.939299Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49734","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T06:26:58.955012Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49718","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T06:26:58.966306Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49742","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T06:26:58.982251Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49758","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T06:26:59.012322Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49772","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T06:26:59.041827Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49798","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T06:26:59.074982Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49816","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T06:26:59.105990Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49832","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T06:26:59.120982Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49858","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T06:26:59.157311Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49880","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T06:26:59.204925Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49900","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T06:26:59.277626Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49914","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T06:26:59.281013Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49928","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T06:26:59.305103Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49950","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T06:26:59.326005Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49972","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T06:26:59.391794Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50002","server-name":"","error":"EOF"}
==> kernel <==
06:27:20 up 4:09, 0 users, load average: 2.29, 3.01, 3.35
Linux dockerenv-668100 5.15.0-1084-aws #91~20.04.1-Ubuntu SMP Fri May 2 07:00:04 UTC 2025 aarch64 aarch64 aarch64 GNU/Linux
PRETTY_NAME="Ubuntu 22.04.5 LTS"
==> kindnet [882fe50ff0640fcc6be702ebd8fb9bb219cb6ed580178c5642230626d1843c44] <==
I0904 06:27:10.740381 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I0904 06:27:10.740636 1 main.go:139] hostIP = 192.168.49.2
podIP = 192.168.49.2
I0904 06:27:10.740822 1 main.go:148] setting mtu 1500 for CNI
I0904 06:27:10.740836 1 main.go:178] kindnetd IP family: "ipv4"
I0904 06:27:10.740847 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
time="2025-09-04T06:27:10Z" level=info msg="Created plugin 10-kube-network-policies (kindnetd, handles RunPodSandbox,RemovePodSandbox)"
I0904 06:27:10.941169 1 controller.go:377] "Starting controller" name="kube-network-policies"
I0904 06:27:10.941252 1 controller.go:381] "Waiting for informer caches to sync"
I0904 06:27:10.941283 1 shared_informer.go:350] "Waiting for caches to sync" controller="kube-network-policies"
I0904 06:27:10.942214 1 controller.go:390] nri plugin exited: failed to connect to NRI service: dial unix /var/run/nri/nri.sock: connect: no such file or directory
==> kube-apiserver [1ee90978e53f4c922480a75ed1d74f67e1eefed0bf85dec4d6b473a4d2c0135b] <==
I0904 06:27:00.662030 1 cache.go:39] Caches are synced for autoregister controller
I0904 06:27:00.668903 1 controller.go:667] quota admission added evaluator for: namespaces
E0904 06:27:00.676342 1 controller.go:145] "Failed to ensure lease exists, will retry" err="namespaces \"kube-system\" not found" interval="200ms"
I0904 06:27:00.698423 1 default_servicecidr_controller.go:228] Setting default ServiceCIDR condition Ready to True
I0904 06:27:00.702226 1 cidrallocator.go:301] created ClusterIP allocator for Service CIDR 10.96.0.0/12
I0904 06:27:00.725914 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12
I0904 06:27:00.729397 1 default_servicecidr_controller.go:137] Shutting down kubernetes-service-cidr-controller
I0904 06:27:00.882090 1 controller.go:667] quota admission added evaluator for: leases.coordination.k8s.io
I0904 06:27:01.193804 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I0904 06:27:01.203942 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I0904 06:27:01.204140 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I0904 06:27:02.299671 1 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I0904 06:27:02.379772 1 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I0904 06:27:02.515913 1 alloc.go:328] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W0904 06:27:02.525119 1 lease.go:265] Resetting endpoints for master service "kubernetes" to [192.168.49.2]
I0904 06:27:02.526603 1 controller.go:667] quota admission added evaluator for: endpoints
I0904 06:27:02.532597 1 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io
I0904 06:27:03.434265 1 controller.go:667] quota admission added evaluator for: serviceaccounts
I0904 06:27:03.730312 1 controller.go:667] quota admission added evaluator for: deployments.apps
I0904 06:27:03.747991 1 alloc.go:328] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I0904 06:27:03.761594 1 controller.go:667] quota admission added evaluator for: daemonsets.apps
I0904 06:27:09.092456 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12
I0904 06:27:09.097450 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12
I0904 06:27:09.337794 1 controller.go:667] quota admission added evaluator for: controllerrevisions.apps
I0904 06:27:09.445404 1 controller.go:667] quota admission added evaluator for: replicasets.apps
==> kube-controller-manager [1cd1d9f20ef8ade227088850e8a4b70283e0db55a19acab86c1d990dfd1c836e] <==
I0904 06:27:08.434011 1 node_lifecycle_controller.go:1067] "Controller detected that zone is now in new state" logger="node-lifecycle-controller" zone="" newState="Normal"
I0904 06:27:08.433743 1 shared_informer.go:356] "Caches are synced" controller="legacy-service-account-token-cleaner"
I0904 06:27:08.433835 1 shared_informer.go:356] "Caches are synced" controller="bootstrap_signer"
I0904 06:27:08.435209 1 shared_informer.go:356] "Caches are synced" controller="taint-eviction-controller"
I0904 06:27:08.435599 1 shared_informer.go:356] "Caches are synced" controller="TTL"
I0904 06:27:08.436522 1 shared_informer.go:356] "Caches are synced" controller="cronjob"
I0904 06:27:08.439566 1 shared_informer.go:356] "Caches are synced" controller="VAC protection"
I0904 06:27:08.439750 1 shared_informer.go:356] "Caches are synced" controller="resource quota"
I0904 06:27:08.448257 1 shared_informer.go:356] "Caches are synced" controller="ClusterRoleAggregator"
I0904 06:27:08.451909 1 shared_informer.go:356] "Caches are synced" controller="TTL after finished"
I0904 06:27:08.461190 1 shared_informer.go:356] "Caches are synced" controller="PVC protection"
I0904 06:27:08.464763 1 shared_informer.go:356] "Caches are synced" controller="endpoint_slice_mirroring"
I0904 06:27:08.467576 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
I0904 06:27:08.474547 1 shared_informer.go:356] "Caches are synced" controller="PV protection"
I0904 06:27:08.480807 1 shared_informer.go:356] "Caches are synced" controller="disruption"
I0904 06:27:08.480990 1 shared_informer.go:356] "Caches are synced" controller="HPA"
I0904 06:27:08.482051 1 shared_informer.go:356] "Caches are synced" controller="stateful set"
I0904 06:27:08.483778 1 shared_informer.go:356] "Caches are synced" controller="job"
I0904 06:27:08.484106 1 shared_informer.go:356] "Caches are synced" controller="deployment"
I0904 06:27:08.484784 1 shared_informer.go:356] "Caches are synced" controller="crt configmap"
I0904 06:27:08.485116 1 shared_informer.go:356] "Caches are synced" controller="ReplicationController"
I0904 06:27:08.485231 1 shared_informer.go:356] "Caches are synced" controller="attach detach"
I0904 06:27:08.485547 1 shared_informer.go:356] "Caches are synced" controller="resource_claim"
I0904 06:27:08.488470 1 shared_informer.go:356] "Caches are synced" controller="endpoint"
I0904 06:27:08.490990 1 shared_informer.go:356] "Caches are synced" controller="resource quota"
==> kube-proxy [380b173f1f0b66abb94a52bc25f3037e00c21670f7fb2a304fcc5d5751ffae9a] <==
I0904 06:27:10.534203 1 server_linux.go:53] "Using iptables proxy"
I0904 06:27:10.618919 1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache"
I0904 06:27:10.725005 1 shared_informer.go:356] "Caches are synced" controller="node informer cache"
I0904 06:27:10.725051 1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.49.2"]
E0904 06:27:10.725126 1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I0904 06:27:10.800234 1 server.go:265] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I0904 06:27:10.800302 1 server_linux.go:132] "Using iptables Proxier"
I0904 06:27:10.804169 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I0904 06:27:10.804548 1 server.go:527] "Version info" version="v1.34.0"
I0904 06:27:10.804574 1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0904 06:27:10.806502 1 config.go:200] "Starting service config controller"
I0904 06:27:10.806941 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
I0904 06:27:10.807215 1 config.go:106] "Starting endpoint slice config controller"
I0904 06:27:10.807306 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
I0904 06:27:10.807404 1 config.go:403] "Starting serviceCIDR config controller"
I0904 06:27:10.807482 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
I0904 06:27:10.815400 1 config.go:309] "Starting node config controller"
I0904 06:27:10.815475 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
I0904 06:27:10.819239 1 shared_informer.go:356] "Caches are synced" controller="node config"
I0904 06:27:10.907917 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
I0904 06:27:10.907937 1 shared_informer.go:356] "Caches are synced" controller="service config"
I0904 06:27:10.908132 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
==> kube-scheduler [6ae62be481cbea4d22cfc09d04c38605d9e3ea025928c5358b8797c6fe76c42c] <==
I0904 06:27:01.282793 1 serving.go:386] Generated self-signed cert in-memory
W0904 06:27:02.280597 1 requestheader_controller.go:204] Unable to get configmap/extension-apiserver-authentication in kube-system. Usually fixed by 'kubectl create rolebinding -n kube-system ROLEBINDING_NAME --role=extension-apiserver-authentication-reader --serviceaccount=YOUR_NS:YOUR_SA'
W0904 06:27:02.281377 1 authentication.go:397] Error looking up in-cluster authentication configuration: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot get resource "configmaps" in API group "" in the namespace "kube-system"
W0904 06:27:02.281534 1 authentication.go:398] Continuing without authentication configuration. This may treat all requests as anonymous.
W0904 06:27:02.281628 1 authentication.go:399] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
I0904 06:27:02.314069 1 server.go:175] "Starting Kubernetes Scheduler" version="v1.34.0"
I0904 06:27:02.314105 1 server.go:177] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0904 06:27:02.316135 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I0904 06:27:02.316342 1 shared_informer.go:349] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I0904 06:27:02.316561 1 secure_serving.go:211] Serving securely on 127.0.0.1:10259
I0904 06:27:02.316653 1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
E0904 06:27:02.326118 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError" reflector="runtime/asm_arm64.s:1223" type="*v1.ConfigMap"
I0904 06:27:03.816510 1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
==> kubelet <==
Sep 04 06:27:09 dockerenv-668100 kubelet[1540]: I0904 06:27:09.381023 1540 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/4858c187-b6a5-4f05-bbe2-c1e23c6af60c-xtables-lock\") pod \"kindnet-gktdj\" (UID: \"4858c187-b6a5-4f05-bbe2-c1e23c6af60c\") " pod="kube-system/kindnet-gktdj"
Sep 04 06:27:09 dockerenv-668100 kubelet[1540]: I0904 06:27:09.381286 1540 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/2c703103-b24a-4f1e-83c0-e70b47e465ae-xtables-lock\") pod \"kube-proxy-cnl9w\" (UID: \"2c703103-b24a-4f1e-83c0-e70b47e465ae\") " pod="kube-system/kube-proxy-cnl9w"
Sep 04 06:27:09 dockerenv-668100 kubelet[1540]: I0904 06:27:09.381375 1540 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/4858c187-b6a5-4f05-bbe2-c1e23c6af60c-lib-modules\") pod \"kindnet-gktdj\" (UID: \"4858c187-b6a5-4f05-bbe2-c1e23c6af60c\") " pod="kube-system/kindnet-gktdj"
Sep 04 06:27:09 dockerenv-668100 kubelet[1540]: I0904 06:27:09.381452 1540 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/2c703103-b24a-4f1e-83c0-e70b47e465ae-kube-proxy\") pod \"kube-proxy-cnl9w\" (UID: \"2c703103-b24a-4f1e-83c0-e70b47e465ae\") " pod="kube-system/kube-proxy-cnl9w"
Sep 04 06:27:09 dockerenv-668100 kubelet[1540]: I0904 06:27:09.381530 1540 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jncd4\" (UniqueName: \"kubernetes.io/projected/2c703103-b24a-4f1e-83c0-e70b47e465ae-kube-api-access-jncd4\") pod \"kube-proxy-cnl9w\" (UID: \"2c703103-b24a-4f1e-83c0-e70b47e465ae\") " pod="kube-system/kube-proxy-cnl9w"
Sep 04 06:27:09 dockerenv-668100 kubelet[1540]: I0904 06:27:09.381636 1540 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/4858c187-b6a5-4f05-bbe2-c1e23c6af60c-cni-cfg\") pod \"kindnet-gktdj\" (UID: \"4858c187-b6a5-4f05-bbe2-c1e23c6af60c\") " pod="kube-system/kindnet-gktdj"
Sep 04 06:27:09 dockerenv-668100 kubelet[1540]: I0904 06:27:09.381714 1540 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tq4tk\" (UniqueName: \"kubernetes.io/projected/4858c187-b6a5-4f05-bbe2-c1e23c6af60c-kube-api-access-tq4tk\") pod \"kindnet-gktdj\" (UID: \"4858c187-b6a5-4f05-bbe2-c1e23c6af60c\") " pod="kube-system/kindnet-gktdj"
Sep 04 06:27:09 dockerenv-668100 kubelet[1540]: I0904 06:27:09.381802 1540 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/2c703103-b24a-4f1e-83c0-e70b47e465ae-lib-modules\") pod \"kube-proxy-cnl9w\" (UID: \"2c703103-b24a-4f1e-83c0-e70b47e465ae\") " pod="kube-system/kube-proxy-cnl9w"
Sep 04 06:27:09 dockerenv-668100 kubelet[1540]: E0904 06:27:09.495717 1540 projected.go:291] Couldn't get configMap kube-system/kube-root-ca.crt: configmap "kube-root-ca.crt" not found
Sep 04 06:27:09 dockerenv-668100 kubelet[1540]: E0904 06:27:09.495758 1540 projected.go:196] Error preparing data for projected volume kube-api-access-tq4tk for pod kube-system/kindnet-gktdj: configmap "kube-root-ca.crt" not found
Sep 04 06:27:09 dockerenv-668100 kubelet[1540]: E0904 06:27:09.495820 1540 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4858c187-b6a5-4f05-bbe2-c1e23c6af60c-kube-api-access-tq4tk podName:4858c187-b6a5-4f05-bbe2-c1e23c6af60c nodeName:}" failed. No retries permitted until 2025-09-04 06:27:09.995799878 +0000 UTC m=+6.499909084 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-tq4tk" (UniqueName: "kubernetes.io/projected/4858c187-b6a5-4f05-bbe2-c1e23c6af60c-kube-api-access-tq4tk") pod "kindnet-gktdj" (UID: "4858c187-b6a5-4f05-bbe2-c1e23c6af60c") : configmap "kube-root-ca.crt" not found
Sep 04 06:27:09 dockerenv-668100 kubelet[1540]: E0904 06:27:09.497948 1540 projected.go:291] Couldn't get configMap kube-system/kube-root-ca.crt: configmap "kube-root-ca.crt" not found
Sep 04 06:27:09 dockerenv-668100 kubelet[1540]: E0904 06:27:09.497990 1540 projected.go:196] Error preparing data for projected volume kube-api-access-jncd4 for pod kube-system/kube-proxy-cnl9w: configmap "kube-root-ca.crt" not found
Sep 04 06:27:09 dockerenv-668100 kubelet[1540]: E0904 06:27:09.498060 1540 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/2c703103-b24a-4f1e-83c0-e70b47e465ae-kube-api-access-jncd4 podName:2c703103-b24a-4f1e-83c0-e70b47e465ae nodeName:}" failed. No retries permitted until 2025-09-04 06:27:09.998038943 +0000 UTC m=+6.502148150 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-jncd4" (UniqueName: "kubernetes.io/projected/2c703103-b24a-4f1e-83c0-e70b47e465ae-kube-api-access-jncd4") pod "kube-proxy-cnl9w" (UID: "2c703103-b24a-4f1e-83c0-e70b47e465ae") : configmap "kube-root-ca.crt" not found
Sep 04 06:27:09 dockerenv-668100 kubelet[1540]: I0904 06:27:09.786519 1540 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f5428f91-007a-40ef-aea7-9ed4ae42021f-config-volume\") pod \"coredns-66bc5c9577-sjph7\" (UID: \"f5428f91-007a-40ef-aea7-9ed4ae42021f\") " pod="kube-system/coredns-66bc5c9577-sjph7"
Sep 04 06:27:09 dockerenv-668100 kubelet[1540]: I0904 06:27:09.787041 1540 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5n6sb\" (UniqueName: \"kubernetes.io/projected/f5428f91-007a-40ef-aea7-9ed4ae42021f-kube-api-access-5n6sb\") pod \"coredns-66bc5c9577-sjph7\" (UID: \"f5428f91-007a-40ef-aea7-9ed4ae42021f\") " pod="kube-system/coredns-66bc5c9577-sjph7"
Sep 04 06:27:09 dockerenv-668100 kubelet[1540]: I0904 06:27:09.897464 1540 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory"
Sep 04 06:27:10 dockerenv-668100 kubelet[1540]: E0904 06:27:10.011779 1540 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"d3be208230571470bdd12d3803e95a1a23265b4f0ad5e94e586c896d4631f633\": failed to find network info for sandbox \"d3be208230571470bdd12d3803e95a1a23265b4f0ad5e94e586c896d4631f633\""
Sep 04 06:27:10 dockerenv-668100 kubelet[1540]: E0904 06:27:10.011885 1540 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"d3be208230571470bdd12d3803e95a1a23265b4f0ad5e94e586c896d4631f633\": failed to find network info for sandbox \"d3be208230571470bdd12d3803e95a1a23265b4f0ad5e94e586c896d4631f633\"" pod="kube-system/coredns-66bc5c9577-sjph7"
Sep 04 06:27:10 dockerenv-668100 kubelet[1540]: E0904 06:27:10.011908 1540 kuberuntime_manager.go:1343] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"d3be208230571470bdd12d3803e95a1a23265b4f0ad5e94e586c896d4631f633\": failed to find network info for sandbox \"d3be208230571470bdd12d3803e95a1a23265b4f0ad5e94e586c896d4631f633\"" pod="kube-system/coredns-66bc5c9577-sjph7"
Sep 04 06:27:10 dockerenv-668100 kubelet[1540]: E0904 06:27:10.011968 1540 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"coredns-66bc5c9577-sjph7_kube-system(f5428f91-007a-40ef-aea7-9ed4ae42021f)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"coredns-66bc5c9577-sjph7_kube-system(f5428f91-007a-40ef-aea7-9ed4ae42021f)\\\": rpc error: code = Unknown desc = failed to setup network for sandbox \\\"d3be208230571470bdd12d3803e95a1a23265b4f0ad5e94e586c896d4631f633\\\": failed to find network info for sandbox \\\"d3be208230571470bdd12d3803e95a1a23265b4f0ad5e94e586c896d4631f633\\\"\"" pod="kube-system/coredns-66bc5c9577-sjph7" podUID="f5428f91-007a-40ef-aea7-9ed4ae42021f"
Sep 04 06:27:10 dockerenv-668100 kubelet[1540]: I0904 06:27:10.764132 1540 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-proxy-cnl9w" podStartSLOduration=1.7641143270000001 podStartE2EDuration="1.764114327s" podCreationTimestamp="2025-09-04 06:27:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-04 06:27:10.76399357 +0000 UTC m=+7.268102777" watchObservedRunningTime="2025-09-04 06:27:10.764114327 +0000 UTC m=+7.268223533"
Sep 04 06:27:11 dockerenv-668100 kubelet[1540]: I0904 06:27:11.762252 1540 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kindnet-gktdj" podStartSLOduration=2.762232479 podStartE2EDuration="2.762232479s" podCreationTimestamp="2025-09-04 06:27:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-04 06:27:10.790653136 +0000 UTC m=+7.294762342" watchObservedRunningTime="2025-09-04 06:27:11.762232479 +0000 UTC m=+8.266341694"
Sep 04 06:27:14 dockerenv-668100 kubelet[1540]: I0904 06:27:14.035175 1540 kuberuntime_manager.go:1828] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24"
Sep 04 06:27:14 dockerenv-668100 kubelet[1540]: I0904 06:27:14.036560 1540 kubelet_network.go:47] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
==> storage-provisioner [94e77ae694a386fd3c954fbe0ec6379751ed420a5f54c632274658230e01f8cf] <==
I0904 06:27:10.787740 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-arm64 status --format={{.APIServer}} -p dockerenv-668100 -n dockerenv-668100
helpers_test.go:269: (dbg) Run: kubectl --context dockerenv-668100 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:280: non-running pods: coredns-66bc5c9577-sjph7
helpers_test.go:282: ======> post-mortem[TestDockerEnvContainerd]: describe non-running pods <======
helpers_test.go:285: (dbg) Run: kubectl --context dockerenv-668100 describe pod coredns-66bc5c9577-sjph7
helpers_test.go:285: (dbg) Non-zero exit: kubectl --context dockerenv-668100 describe pod coredns-66bc5c9577-sjph7: exit status 1 (96.469023ms)
** stderr **
Error from server (NotFound): pods "coredns-66bc5c9577-sjph7" not found
** /stderr **
helpers_test.go:287: kubectl --context dockerenv-668100 describe pod coredns-66bc5c9577-sjph7: exit status 1
helpers_test.go:175: Cleaning up "dockerenv-668100" profile ...
helpers_test.go:178: (dbg) Run: out/minikube-linux-arm64 delete -p dockerenv-668100
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p dockerenv-668100: (1.946916615s)
--- FAIL: TestDockerEnvContainerd (52.51s)