=== RUN TestDockerEnvContainerd
docker_test.go:170: running with containerd true linux amd64
docker_test.go:181: (dbg) Run: out/minikube-linux-amd64 start -p dockerenv-217193 --driver=docker --container-runtime=containerd
docker_test.go:181: (dbg) Done: out/minikube-linux-amd64 start -p dockerenv-217193 --driver=docker --container-runtime=containerd: (23.503905101s)
docker_test.go:189: (dbg) Run: /bin/bash -c "out/minikube-linux-amd64 docker-env --ssh-host --ssh-add -p dockerenv-217193"
docker_test.go:220: (dbg) Run: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-OmrfoJOe2hNo/agent.417918" SSH_AGENT_PID="417919" DOCKER_HOST=ssh://docker@127.0.0.1:33148 docker version"
docker_test.go:243: (dbg) Run: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-OmrfoJOe2hNo/agent.417918" SSH_AGENT_PID="417919" DOCKER_HOST=ssh://docker@127.0.0.1:33148 DOCKER_BUILDKIT=0 docker build -t local/minikube-dockerenv-containerd-test:latest testdata/docker-env"
docker_test.go:243: (dbg) Done: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-OmrfoJOe2hNo/agent.417918" SSH_AGENT_PID="417919" DOCKER_HOST=ssh://docker@127.0.0.1:33148 DOCKER_BUILDKIT=0 docker build -t local/minikube-dockerenv-containerd-test:latest testdata/docker-env": (1.860665406s)
docker_test.go:250: (dbg) Run: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-OmrfoJOe2hNo/agent.417918" SSH_AGENT_PID="417919" DOCKER_HOST=ssh://docker@127.0.0.1:33148 docker image ls"
docker_test.go:250: (dbg) Non-zero exit: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-OmrfoJOe2hNo/agent.417918" SSH_AGENT_PID="417919" DOCKER_HOST=ssh://docker@127.0.0.1:33148 docker image ls": exit status 1 (529.260531ms)
** stderr **
error during connect: Get "http://docker.example.com/v1.43/images/json": EOF
** /stderr **
docker_test.go:252: failed to execute 'docker image ls', error: exit status 1, output:
** stderr **
error during connect: Get "http://docker.example.com/v1.43/images/json": EOF
** /stderr **
panic.go:636: *** TestDockerEnvContainerd FAILED at 2025-09-04 04:19:24.581272614 +0000 UTC m=+380.672286365
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestDockerEnvContainerd]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestDockerEnvContainerd]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect dockerenv-217193
helpers_test.go:243: (dbg) docker inspect dockerenv-217193:
-- stdout --
[
{
"Id": "10edcd9dc2fc2d4d2f2b61fad134e1f847ac65e15ced074bb5c43947c2311036",
"Created": "2025-09-04T04:18:52.690319926Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 415001,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-09-04T04:18:52.718225643Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:6f7d8b3ae805e64eb4efe058a75d43d384fe5989473cee7f8e24ea90eca28309",
"ResolvConfPath": "/var/lib/docker/containers/10edcd9dc2fc2d4d2f2b61fad134e1f847ac65e15ced074bb5c43947c2311036/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/10edcd9dc2fc2d4d2f2b61fad134e1f847ac65e15ced074bb5c43947c2311036/hostname",
"HostsPath": "/var/lib/docker/containers/10edcd9dc2fc2d4d2f2b61fad134e1f847ac65e15ced074bb5c43947c2311036/hosts",
"LogPath": "/var/lib/docker/containers/10edcd9dc2fc2d4d2f2b61fad134e1f847ac65e15ced074bb5c43947c2311036/10edcd9dc2fc2d4d2f2b61fad134e1f847ac65e15ced074bb5c43947c2311036-json.log",
"Name": "/dockerenv-217193",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"dockerenv-217193:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "dockerenv-217193",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 8388608000,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 16777216000,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "10edcd9dc2fc2d4d2f2b61fad134e1f847ac65e15ced074bb5c43947c2311036",
"LowerDir": "/var/lib/docker/overlay2/a39c394da3439a85d9718d89bfbc179c99178f57069bf7135f47b7ce95022d95-init/diff:/var/lib/docker/overlay2/0769bef7e3c5865cebf1e3a1be4e4b525196a05d5c3fd7786d90930088730419/diff",
"MergedDir": "/var/lib/docker/overlay2/a39c394da3439a85d9718d89bfbc179c99178f57069bf7135f47b7ce95022d95/merged",
"UpperDir": "/var/lib/docker/overlay2/a39c394da3439a85d9718d89bfbc179c99178f57069bf7135f47b7ce95022d95/diff",
"WorkDir": "/var/lib/docker/overlay2/a39c394da3439a85d9718d89bfbc179c99178f57069bf7135f47b7ce95022d95/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "dockerenv-217193",
"Source": "/var/lib/docker/volumes/dockerenv-217193/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "dockerenv-217193",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "dockerenv-217193",
"name.minikube.sigs.k8s.io": "dockerenv-217193",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "b16dc799c309b3d9121049ab468c3a106cbc3d8ed456bc4a7a7f076a21c86ce7",
"SandboxKey": "/var/run/docker/netns/b16dc799c309",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33148"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33149"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33152"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33150"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33151"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"dockerenv-217193": {
"IPAMConfig": {
"IPv4Address": "192.168.49.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "72:06:28:d2:6d:26",
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "100c0a48728fb003ab653cd3e6fb7600eb8ad3871526f074c07473a0a245e798",
"EndpointID": "64aaf0f95c21655319ff29d214407b93763c950a4edf3f28b25f257a56f4d3c8",
"Gateway": "192.168.49.1",
"IPAddress": "192.168.49.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"dockerenv-217193",
"10edcd9dc2fc"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p dockerenv-217193 -n dockerenv-217193
helpers_test.go:252: <<< TestDockerEnvContainerd FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestDockerEnvContainerd]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-amd64 -p dockerenv-217193 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-amd64 -p dockerenv-217193 logs -n 25: (1.021109141s)
helpers_test.go:260: TestDockerEnvContainerd logs:
-- stdout --
==> Audit <==
┌────────────┬─────────────────────────────────────────────────────────────────────────────────┬──────────────────┬─────────┬─────────┬─────────────────────┬─────────────────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├────────────┼─────────────────────────────────────────────────────────────────────────────────┼──────────────────┼─────────┼─────────┼─────────────────────┼─────────────────────┤
│ addons │ addons-919243 addons disable storage-provisioner-rancher --alsologtostderr -v=1 │ addons-919243 │ jenkins │ v1.36.0 │ 04 Sep 25 04:17 UTC │ 04 Sep 25 04:18 UTC │
│ addons │ addons-919243 addons disable yakd --alsologtostderr -v=1 │ addons-919243 │ jenkins │ v1.36.0 │ 04 Sep 25 04:17 UTC │ 04 Sep 25 04:17 UTC │
│ ip │ addons-919243 ip │ addons-919243 │ jenkins │ v1.36.0 │ 04 Sep 25 04:17 UTC │ 04 Sep 25 04:17 UTC │
│ addons │ addons-919243 addons disable registry --alsologtostderr -v=1 │ addons-919243 │ jenkins │ v1.36.0 │ 04 Sep 25 04:17 UTC │ 04 Sep 25 04:17 UTC │
│ addons │ addons-919243 addons disable inspektor-gadget --alsologtostderr -v=1 │ addons-919243 │ jenkins │ v1.36.0 │ 04 Sep 25 04:17 UTC │ 04 Sep 25 04:17 UTC │
│ addons │ addons-919243 addons disable metrics-server --alsologtostderr -v=1 │ addons-919243 │ jenkins │ v1.36.0 │ 04 Sep 25 04:17 UTC │ 04 Sep 25 04:17 UTC │
│ addons │ addons-919243 addons disable cloud-spanner --alsologtostderr -v=1 │ addons-919243 │ jenkins │ v1.36.0 │ 04 Sep 25 04:18 UTC │ 04 Sep 25 04:18 UTC │
│ addons │ enable headlamp -p addons-919243 --alsologtostderr -v=1 │ addons-919243 │ jenkins │ v1.36.0 │ 04 Sep 25 04:18 UTC │ 04 Sep 25 04:18 UTC │
│ addons │ addons-919243 addons disable nvidia-device-plugin --alsologtostderr -v=1 │ addons-919243 │ jenkins │ v1.36.0 │ 04 Sep 25 04:18 UTC │ 04 Sep 25 04:18 UTC │
│ addons │ addons-919243 addons disable headlamp --alsologtostderr -v=1 │ addons-919243 │ jenkins │ v1.36.0 │ 04 Sep 25 04:18 UTC │ 04 Sep 25 04:18 UTC │
│ ssh │ addons-919243 ssh curl -s http://127.0.0.1/ -H 'Host: nginx.example.com' │ addons-919243 │ jenkins │ v1.36.0 │ 04 Sep 25 04:18 UTC │ 04 Sep 25 04:18 UTC │
│ ip │ addons-919243 ip │ addons-919243 │ jenkins │ v1.36.0 │ 04 Sep 25 04:18 UTC │ 04 Sep 25 04:18 UTC │
│ addons │ addons-919243 addons disable ingress-dns --alsologtostderr -v=1 │ addons-919243 │ jenkins │ v1.36.0 │ 04 Sep 25 04:18 UTC │ 04 Sep 25 04:18 UTC │
│ addons │ configure registry-creds -f ./testdata/addons_testconfig.json -p addons-919243 │ addons-919243 │ jenkins │ v1.36.0 │ 04 Sep 25 04:18 UTC │ 04 Sep 25 04:18 UTC │
│ addons │ addons-919243 addons disable registry-creds --alsologtostderr -v=1 │ addons-919243 │ jenkins │ v1.36.0 │ 04 Sep 25 04:18 UTC │ 04 Sep 25 04:18 UTC │
│ addons │ addons-919243 addons disable ingress --alsologtostderr -v=1 │ addons-919243 │ jenkins │ v1.36.0 │ 04 Sep 25 04:18 UTC │ 04 Sep 25 04:18 UTC │
│ addons │ addons-919243 addons disable volumesnapshots --alsologtostderr -v=1 │ addons-919243 │ jenkins │ v1.36.0 │ 04 Sep 25 04:18 UTC │ 04 Sep 25 04:18 UTC │
│ addons │ addons-919243 addons disable csi-hostpath-driver --alsologtostderr -v=1 │ addons-919243 │ jenkins │ v1.36.0 │ 04 Sep 25 04:18 UTC │ 04 Sep 25 04:18 UTC │
│ stop │ -p addons-919243 │ addons-919243 │ jenkins │ v1.36.0 │ 04 Sep 25 04:18 UTC │ 04 Sep 25 04:18 UTC │
│ addons │ enable dashboard -p addons-919243 │ addons-919243 │ jenkins │ v1.36.0 │ 04 Sep 25 04:18 UTC │ 04 Sep 25 04:18 UTC │
│ addons │ disable dashboard -p addons-919243 │ addons-919243 │ jenkins │ v1.36.0 │ 04 Sep 25 04:18 UTC │ 04 Sep 25 04:18 UTC │
│ addons │ disable gvisor -p addons-919243 │ addons-919243 │ jenkins │ v1.36.0 │ 04 Sep 25 04:18 UTC │ 04 Sep 25 04:18 UTC │
│ delete │ -p addons-919243 │ addons-919243 │ jenkins │ v1.36.0 │ 04 Sep 25 04:18 UTC │ 04 Sep 25 04:18 UTC │
│ start │ -p dockerenv-217193 --driver=docker --container-runtime=containerd │ dockerenv-217193 │ jenkins │ v1.36.0 │ 04 Sep 25 04:18 UTC │ 04 Sep 25 04:19 UTC │
│ docker-env │ --ssh-host --ssh-add -p dockerenv-217193 │ dockerenv-217193 │ jenkins │ v1.36.0 │ 04 Sep 25 04:19 UTC │ 04 Sep 25 04:19 UTC │
└────────────┴─────────────────────────────────────────────────────────────────────────────────┴──────────────────┴─────────┴─────────┴─────────────────────┴─────────────────────┘
==> Last Start <==
Log file created at: 2025/09/04 04:18:47
Running on machine: ubuntu-20-agent-6
Binary: Built with gc go1.24.6 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0904 04:18:47.585871 414464 out.go:360] Setting OutFile to fd 1 ...
I0904 04:18:47.585972 414464 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0904 04:18:47.585975 414464 out.go:374] Setting ErrFile to fd 2...
I0904 04:18:47.585978 414464 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0904 04:18:47.586147 414464 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21409-385918/.minikube/bin
I0904 04:18:47.586706 414464 out.go:368] Setting JSON to false
I0904 04:18:47.587640 414464 start.go:130] hostinfo: {"hostname":"ubuntu-20-agent-6","uptime":7271,"bootTime":1756952257,"procs":182,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1083-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I0904 04:18:47.587725 414464 start.go:140] virtualization: kvm guest
I0904 04:18:47.589803 414464 out.go:179] * [dockerenv-217193] minikube v1.36.0 on Ubuntu 20.04 (kvm/amd64)
I0904 04:18:47.590904 414464 out.go:179] - MINIKUBE_LOCATION=21409
I0904 04:18:47.590928 414464 notify.go:220] Checking for updates...
I0904 04:18:47.592925 414464 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0904 04:18:47.594060 414464 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21409-385918/kubeconfig
I0904 04:18:47.595085 414464 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21409-385918/.minikube
I0904 04:18:47.596121 414464 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I0904 04:18:47.597090 414464 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I0904 04:18:47.598294 414464 driver.go:421] Setting default libvirt URI to qemu:///system
I0904 04:18:47.620763 414464 docker.go:123] docker version: linux-28.1.1:Docker Engine - Community
I0904 04:18:47.620838 414464 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0904 04:18:47.668639 414464 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:24 OomKillDisable:true NGoroutines:43 SystemTime:2025-09-04 04:18:47.659459057 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1083-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33647984640 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-6 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx
Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0904 04:18:47.668755 414464 docker.go:318] overlay module found
I0904 04:18:47.670772 414464 out.go:179] * Using the docker driver based on user configuration
I0904 04:18:47.671734 414464 start.go:304] selected driver: docker
I0904 04:18:47.671741 414464 start.go:918] validating driver "docker" against <nil>
I0904 04:18:47.671750 414464 start.go:929] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0904 04:18:47.671848 414464 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0904 04:18:47.716557 414464 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:24 OomKillDisable:true NGoroutines:43 SystemTime:2025-09-04 04:18:47.708062418 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1083-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33647984640 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-6 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx
Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0904 04:18:47.716747 414464 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I0904 04:18:47.717223 414464 start_flags.go:410] Using suggested 8000MB memory alloc based on sys=32089MB, container=32089MB
I0904 04:18:47.717355 414464 start_flags.go:974] Wait components to verify : map[apiserver:true system_pods:true]
I0904 04:18:47.718828 414464 out.go:179] * Using Docker driver with root privileges
I0904 04:18:47.719947 414464 cni.go:84] Creating CNI manager for ""
I0904 04:18:47.720002 414464 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I0904 04:18:47.720010 414464 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I0904 04:18:47.720068 414464 start.go:348] cluster config:
{Name:dockerenv-217193 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc Memory:8000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:dockerenv-217193 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerR
untime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0904 04:18:47.721194 414464 out.go:179] * Starting "dockerenv-217193" primary control-plane node in "dockerenv-217193" cluster
I0904 04:18:47.722093 414464 cache.go:123] Beginning downloading kic base image for docker with containerd
I0904 04:18:47.723065 414464 out.go:179] * Pulling base image v0.0.47-1756936034-21409 ...
I0904 04:18:47.723992 414464 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime containerd
I0904 04:18:47.724015 414464 preload.go:146] Found local preload: /home/jenkins/minikube-integration/21409-385918/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4
I0904 04:18:47.724027 414464 cache.go:58] Caching tarball of preloaded images
I0904 04:18:47.724087 414464 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc in local docker daemon
I0904 04:18:47.724146 414464 preload.go:172] Found /home/jenkins/minikube-integration/21409-385918/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4 in cache, skipping download
I0904 04:18:47.724154 414464 cache.go:61] Finished verifying existence of preloaded tar for v1.34.0 on containerd
I0904 04:18:47.724514 414464 profile.go:143] Saving config to /home/jenkins/minikube-integration/21409-385918/.minikube/profiles/dockerenv-217193/config.json ...
I0904 04:18:47.724533 414464 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21409-385918/.minikube/profiles/dockerenv-217193/config.json: {Name:mk6b1024e4e931e7400166a2e2d85f0d3c5ccacc Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0904 04:18:47.743807 414464 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc in local docker daemon, skipping pull
I0904 04:18:47.743817 414464 cache.go:147] gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc exists in daemon, skipping load
I0904 04:18:47.743831 414464 cache.go:232] Successfully downloaded all kic artifacts
I0904 04:18:47.743869 414464 start.go:360] acquireMachinesLock for dockerenv-217193: {Name:mk7ccc96e834e525fa5078113b5f9a42d3a0d4b8 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0904 04:18:47.743951 414464 start.go:364] duration metric: took 69.646µs to acquireMachinesLock for "dockerenv-217193"
I0904 04:18:47.743968 414464 start.go:93] Provisioning new machine with config: &{Name:dockerenv-217193 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc Memory:8000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:dockerenv-217193 Namespace:default APIServerHAVIP: APIServerNa
me:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAut
hSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0904 04:18:47.744022 414464 start.go:125] createHost starting for "" (driver="docker")
I0904 04:18:47.745427 414464 out.go:252] * Creating docker container (CPUs=2, Memory=8000MB) ...
I0904 04:18:47.745610 414464 start.go:159] libmachine.API.Create for "dockerenv-217193" (driver="docker")
I0904 04:18:47.745632 414464 client.go:168] LocalClient.Create starting
I0904 04:18:47.745712 414464 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21409-385918/.minikube/certs/ca.pem
I0904 04:18:47.745740 414464 main.go:141] libmachine: Decoding PEM data...
I0904 04:18:47.745753 414464 main.go:141] libmachine: Parsing certificate...
I0904 04:18:47.745798 414464 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21409-385918/.minikube/certs/cert.pem
I0904 04:18:47.745811 414464 main.go:141] libmachine: Decoding PEM data...
I0904 04:18:47.745823 414464 main.go:141] libmachine: Parsing certificate...
I0904 04:18:47.746127 414464 cli_runner.go:164] Run: docker network inspect dockerenv-217193 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0904 04:18:47.761460 414464 cli_runner.go:211] docker network inspect dockerenv-217193 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0904 04:18:47.761522 414464 network_create.go:284] running [docker network inspect dockerenv-217193] to gather additional debugging logs...
I0904 04:18:47.761536 414464 cli_runner.go:164] Run: docker network inspect dockerenv-217193
W0904 04:18:47.777465 414464 cli_runner.go:211] docker network inspect dockerenv-217193 returned with exit code 1
I0904 04:18:47.777482 414464 network_create.go:287] error running [docker network inspect dockerenv-217193]: docker network inspect dockerenv-217193: exit status 1
stdout:
[]
stderr:
Error response from daemon: network dockerenv-217193 not found
I0904 04:18:47.777491 414464 network_create.go:289] output of [docker network inspect dockerenv-217193]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network dockerenv-217193 not found
** /stderr **
I0904 04:18:47.777593 414464 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0904 04:18:47.793048 414464 network.go:206] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001cc91b0}
I0904 04:18:47.793087 414464 network_create.go:124] attempt to create docker network dockerenv-217193 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
I0904 04:18:47.793156 414464 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=dockerenv-217193 dockerenv-217193
I0904 04:18:47.839801 414464 network_create.go:108] docker network dockerenv-217193 192.168.49.0/24 created
I0904 04:18:47.839820 414464 kic.go:121] calculated static IP "192.168.49.2" for the "dockerenv-217193" container
I0904 04:18:47.839897 414464 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0904 04:18:47.855351 414464 cli_runner.go:164] Run: docker volume create dockerenv-217193 --label name.minikube.sigs.k8s.io=dockerenv-217193 --label created_by.minikube.sigs.k8s.io=true
I0904 04:18:47.871854 414464 oci.go:103] Successfully created a docker volume dockerenv-217193
I0904 04:18:47.871909 414464 cli_runner.go:164] Run: docker run --rm --name dockerenv-217193-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=dockerenv-217193 --entrypoint /usr/bin/test -v dockerenv-217193:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc -d /var/lib
I0904 04:18:48.313981 414464 oci.go:107] Successfully prepared a docker volume dockerenv-217193
I0904 04:18:48.314022 414464 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime containerd
I0904 04:18:48.314044 414464 kic.go:194] Starting extracting preloaded images to volume ...
I0904 04:18:48.314114 414464 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21409-385918/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v dockerenv-217193:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc -I lz4 -xf /preloaded.tar -C /extractDir
I0904 04:18:52.629705 414464 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21409-385918/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v dockerenv-217193:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc -I lz4 -xf /preloaded.tar -C /extractDir: (4.315537917s)
I0904 04:18:52.629748 414464 kic.go:203] duration metric: took 4.31570003s to extract preloaded images to volume ...
W0904 04:18:52.630047 414464 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I0904 04:18:52.630150 414464 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0904 04:18:52.674792 414464 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname dockerenv-217193 --name dockerenv-217193 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=dockerenv-217193 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=dockerenv-217193 --network dockerenv-217193 --ip 192.168.49.2 --volume dockerenv-217193:/var --security-opt apparmor=unconfined --memory=8000mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc
I0904 04:18:52.917444 414464 cli_runner.go:164] Run: docker container inspect dockerenv-217193 --format={{.State.Running}}
I0904 04:18:52.934615 414464 cli_runner.go:164] Run: docker container inspect dockerenv-217193 --format={{.State.Status}}
I0904 04:18:52.953430 414464 cli_runner.go:164] Run: docker exec dockerenv-217193 stat /var/lib/dpkg/alternatives/iptables
I0904 04:18:52.992033 414464 oci.go:144] the created container "dockerenv-217193" has a running status.
I0904 04:18:52.992058 414464 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21409-385918/.minikube/machines/dockerenv-217193/id_rsa...
I0904 04:18:53.338861 414464 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21409-385918/.minikube/machines/dockerenv-217193/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0904 04:18:53.357826 414464 cli_runner.go:164] Run: docker container inspect dockerenv-217193 --format={{.State.Status}}
I0904 04:18:53.374077 414464 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0904 04:18:53.374092 414464 kic_runner.go:114] Args: [docker exec --privileged dockerenv-217193 chown docker:docker /home/docker/.ssh/authorized_keys]
I0904 04:18:53.418903 414464 cli_runner.go:164] Run: docker container inspect dockerenv-217193 --format={{.State.Status}}
I0904 04:18:53.439155 414464 machine.go:93] provisionDockerMachine start ...
I0904 04:18:53.439252 414464 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-217193
I0904 04:18:53.458601 414464 main.go:141] libmachine: Using SSH client type: native
I0904 04:18:53.458818 414464 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x83a420] 0x83d120 <nil> [] 0s} 127.0.0.1 33148 <nil> <nil>}
I0904 04:18:53.458826 414464 main.go:141] libmachine: About to run SSH command:
hostname
I0904 04:18:53.598744 414464 main.go:141] libmachine: SSH cmd err, output: <nil>: dockerenv-217193
I0904 04:18:53.598771 414464 ubuntu.go:182] provisioning hostname "dockerenv-217193"
I0904 04:18:53.598855 414464 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-217193
I0904 04:18:53.617785 414464 main.go:141] libmachine: Using SSH client type: native
I0904 04:18:53.618020 414464 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x83a420] 0x83d120 <nil> [] 0s} 127.0.0.1 33148 <nil> <nil>}
I0904 04:18:53.618029 414464 main.go:141] libmachine: About to run SSH command:
sudo hostname dockerenv-217193 && echo "dockerenv-217193" | sudo tee /etc/hostname
I0904 04:18:53.748856 414464 main.go:141] libmachine: SSH cmd err, output: <nil>: dockerenv-217193
I0904 04:18:53.748928 414464 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-217193
I0904 04:18:53.765637 414464 main.go:141] libmachine: Using SSH client type: native
I0904 04:18:53.765837 414464 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x83a420] 0x83d120 <nil> [] 0s} 127.0.0.1 33148 <nil> <nil>}
I0904 04:18:53.765850 414464 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sdockerenv-217193' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 dockerenv-217193/g' /etc/hosts;
else
echo '127.0.1.1 dockerenv-217193' | sudo tee -a /etc/hosts;
fi
fi
I0904 04:18:53.886720 414464 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0904 04:18:53.886742 414464 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21409-385918/.minikube CaCertPath:/home/jenkins/minikube-integration/21409-385918/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21409-385918/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21409-385918/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21409-385918/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21409-385918/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21409-385918/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21409-385918/.minikube}
I0904 04:18:53.886766 414464 ubuntu.go:190] setting up certificates
I0904 04:18:53.886776 414464 provision.go:84] configureAuth start
I0904 04:18:53.886824 414464 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" dockerenv-217193
I0904 04:18:53.903798 414464 provision.go:143] copyHostCerts
I0904 04:18:53.903847 414464 exec_runner.go:144] found /home/jenkins/minikube-integration/21409-385918/.minikube/key.pem, removing ...
I0904 04:18:53.903856 414464 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21409-385918/.minikube/key.pem
I0904 04:18:53.903920 414464 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21409-385918/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21409-385918/.minikube/key.pem (1675 bytes)
I0904 04:18:53.904007 414464 exec_runner.go:144] found /home/jenkins/minikube-integration/21409-385918/.minikube/ca.pem, removing ...
I0904 04:18:53.904011 414464 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21409-385918/.minikube/ca.pem
I0904 04:18:53.904032 414464 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21409-385918/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21409-385918/.minikube/ca.pem (1078 bytes)
I0904 04:18:53.904079 414464 exec_runner.go:144] found /home/jenkins/minikube-integration/21409-385918/.minikube/cert.pem, removing ...
I0904 04:18:53.904082 414464 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21409-385918/.minikube/cert.pem
I0904 04:18:53.904100 414464 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21409-385918/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21409-385918/.minikube/cert.pem (1123 bytes)
I0904 04:18:53.904147 414464 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21409-385918/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21409-385918/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21409-385918/.minikube/certs/ca-key.pem org=jenkins.dockerenv-217193 san=[127.0.0.1 192.168.49.2 dockerenv-217193 localhost minikube]
I0904 04:18:54.244726 414464 provision.go:177] copyRemoteCerts
I0904 04:18:54.244770 414464 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0904 04:18:54.244811 414464 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-217193
I0904 04:18:54.261805 414464 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33148 SSHKeyPath:/home/jenkins/minikube-integration/21409-385918/.minikube/machines/dockerenv-217193/id_rsa Username:docker}
I0904 04:18:54.347088 414464 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-385918/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I0904 04:18:54.368128 414464 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-385918/.minikube/machines/server.pem --> /etc/docker/server.pem (1216 bytes)
I0904 04:18:54.388880 414464 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-385918/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I0904 04:18:54.409438 414464 provision.go:87] duration metric: took 522.646649ms to configureAuth
I0904 04:18:54.409458 414464 ubuntu.go:206] setting minikube options for container-runtime
I0904 04:18:54.409614 414464 config.go:182] Loaded profile config "dockerenv-217193": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.0
I0904 04:18:54.409619 414464 machine.go:96] duration metric: took 970.452111ms to provisionDockerMachine
I0904 04:18:54.409625 414464 client.go:171] duration metric: took 6.663990098s to LocalClient.Create
I0904 04:18:54.409647 414464 start.go:167] duration metric: took 6.664038268s to libmachine.API.Create "dockerenv-217193"
I0904 04:18:54.409653 414464 start.go:293] postStartSetup for "dockerenv-217193" (driver="docker")
I0904 04:18:54.409662 414464 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0904 04:18:54.409700 414464 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0904 04:18:54.409732 414464 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-217193
I0904 04:18:54.428124 414464 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33148 SSHKeyPath:/home/jenkins/minikube-integration/21409-385918/.minikube/machines/dockerenv-217193/id_rsa Username:docker}
I0904 04:18:54.519674 414464 ssh_runner.go:195] Run: cat /etc/os-release
I0904 04:18:54.522680 414464 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0904 04:18:54.522698 414464 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0904 04:18:54.522704 414464 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0904 04:18:54.522712 414464 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I0904 04:18:54.522722 414464 filesync.go:126] Scanning /home/jenkins/minikube-integration/21409-385918/.minikube/addons for local assets ...
I0904 04:18:54.522767 414464 filesync.go:126] Scanning /home/jenkins/minikube-integration/21409-385918/.minikube/files for local assets ...
I0904 04:18:54.522783 414464 start.go:296] duration metric: took 113.125344ms for postStartSetup
I0904 04:18:54.523121 414464 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" dockerenv-217193
I0904 04:18:54.540486 414464 profile.go:143] Saving config to /home/jenkins/minikube-integration/21409-385918/.minikube/profiles/dockerenv-217193/config.json ...
I0904 04:18:54.540715 414464 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0904 04:18:54.540745 414464 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-217193
I0904 04:18:54.557231 414464 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33148 SSHKeyPath:/home/jenkins/minikube-integration/21409-385918/.minikube/machines/dockerenv-217193/id_rsa Username:docker}
I0904 04:18:54.643469 414464 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0904 04:18:54.647411 414464 start.go:128] duration metric: took 6.903372472s to createHost
I0904 04:18:54.647429 414464 start.go:83] releasing machines lock for "dockerenv-217193", held for 6.903471313s
I0904 04:18:54.647502 414464 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" dockerenv-217193
I0904 04:18:54.663595 414464 ssh_runner.go:195] Run: cat /version.json
I0904 04:18:54.663636 414464 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-217193
I0904 04:18:54.663653 414464 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0904 04:18:54.663702 414464 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-217193
I0904 04:18:54.683492 414464 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33148 SSHKeyPath:/home/jenkins/minikube-integration/21409-385918/.minikube/machines/dockerenv-217193/id_rsa Username:docker}
I0904 04:18:54.684993 414464 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33148 SSHKeyPath:/home/jenkins/minikube-integration/21409-385918/.minikube/machines/dockerenv-217193/id_rsa Username:docker}
I0904 04:18:54.842391 414464 ssh_runner.go:195] Run: systemctl --version
I0904 04:18:54.846446 414464 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0904 04:18:54.850569 414464 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0904 04:18:54.872727 414464 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0904 04:18:54.872778 414464 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0904 04:18:54.895548 414464 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0904 04:18:54.895561 414464 start.go:495] detecting cgroup driver to use...
I0904 04:18:54.895592 414464 detect.go:187] detected "cgroupfs" cgroup driver on host os
I0904 04:18:54.895642 414464 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I0904 04:18:54.905998 414464 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0904 04:18:54.915783 414464 docker.go:218] disabling cri-docker service (if available) ...
I0904 04:18:54.915821 414464 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I0904 04:18:54.927802 414464 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I0904 04:18:54.940311 414464 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I0904 04:18:55.010623 414464 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I0904 04:18:55.087136 414464 docker.go:234] disabling docker service ...
I0904 04:18:55.087185 414464 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I0904 04:18:55.106462 414464 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I0904 04:18:55.116591 414464 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I0904 04:18:55.191697 414464 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I0904 04:18:55.262005 414464 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I0904 04:18:55.272041 414464 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0904 04:18:55.286239 414464 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I0904 04:18:55.294464 414464 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0904 04:18:55.302741 414464 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I0904 04:18:55.302777 414464 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I0904 04:18:55.311019 414464 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0904 04:18:55.319302 414464 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0904 04:18:55.327144 414464 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0904 04:18:55.334961 414464 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0904 04:18:55.342446 414464 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0904 04:18:55.350367 414464 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0904 04:18:55.358432 414464 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0904 04:18:55.366806 414464 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0904 04:18:55.373843 414464 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0904 04:18:55.380937 414464 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0904 04:18:55.455264 414464 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0904 04:18:55.561438 414464 start.go:542] Will wait 60s for socket path /run/containerd/containerd.sock
I0904 04:18:55.561488 414464 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I0904 04:18:55.564912 414464 start.go:563] Will wait 60s for crictl version
I0904 04:18:55.564952 414464 ssh_runner.go:195] Run: which crictl
I0904 04:18:55.567856 414464 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0904 04:18:55.598626 414464 start.go:579] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: 1.7.27
RuntimeApiVersion: v1
I0904 04:18:55.598684 414464 ssh_runner.go:195] Run: containerd --version
I0904 04:18:55.620337 414464 ssh_runner.go:195] Run: containerd --version
I0904 04:18:55.646230 414464 out.go:179] * Preparing Kubernetes v1.34.0 on containerd 1.7.27 ...
I0904 04:18:55.647258 414464 cli_runner.go:164] Run: docker network inspect dockerenv-217193 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0904 04:18:55.663264 414464 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0904 04:18:55.666734 414464 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0904 04:18:55.676646 414464 kubeadm.go:875] updating cluster {Name:dockerenv-217193 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc Memory:8000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:dockerenv-217193 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APISe
rverNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock:
SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I0904 04:18:55.676743 414464 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime containerd
I0904 04:18:55.676787 414464 ssh_runner.go:195] Run: sudo crictl images --output json
I0904 04:18:55.707404 414464 containerd.go:627] all images are preloaded for containerd runtime.
I0904 04:18:55.707414 414464 containerd.go:534] Images already preloaded, skipping extraction
I0904 04:18:55.707456 414464 ssh_runner.go:195] Run: sudo crictl images --output json
I0904 04:18:55.738911 414464 containerd.go:627] all images are preloaded for containerd runtime.
I0904 04:18:55.738924 414464 cache_images.go:85] Images are preloaded, skipping loading
I0904 04:18:55.738930 414464 kubeadm.go:926] updating node { 192.168.49.2 8443 v1.34.0 containerd true true} ...
I0904 04:18:55.739015 414464 kubeadm.go:938] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=dockerenv-217193 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
[Install]
config:
{KubernetesVersion:v1.34.0 ClusterName:dockerenv-217193 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0904 04:18:55.739063 414464 ssh_runner.go:195] Run: sudo crictl info
I0904 04:18:55.769880 414464 cni.go:84] Creating CNI manager for ""
I0904 04:18:55.769891 414464 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I0904 04:18:55.769903 414464 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I0904 04:18:55.769920 414464 kubeadm.go:189] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.34.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:dockerenv-217193 NodeName:dockerenv-217193 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPat
h:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0904 04:18:55.770025 414464 kubeadm.go:195] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.49.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "dockerenv-217193"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.49.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0904 04:18:55.770075 414464 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.0
I0904 04:18:55.777824 414464 binaries.go:44] Found k8s binaries, skipping transfer
I0904 04:18:55.777869 414464 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I0904 04:18:55.785730 414464 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (320 bytes)
I0904 04:18:55.801373 414464 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0904 04:18:55.816650 414464 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2229 bytes)
I0904 04:18:55.832111 414464 ssh_runner.go:195] Run: grep 192.168.49.2 control-plane.minikube.internal$ /etc/hosts
I0904 04:18:55.835044 414464 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0904 04:18:55.844481 414464 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0904 04:18:55.913756 414464 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0904 04:18:55.925908 414464 certs.go:68] Setting up /home/jenkins/minikube-integration/21409-385918/.minikube/profiles/dockerenv-217193 for IP: 192.168.49.2
I0904 04:18:55.925921 414464 certs.go:194] generating shared ca certs ...
I0904 04:18:55.925943 414464 certs.go:226] acquiring lock for ca certs: {Name:mk610706de434f58eb65dd97917b7c24a5e9f8b3 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0904 04:18:55.926091 414464 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21409-385918/.minikube/ca.key
I0904 04:18:55.926138 414464 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21409-385918/.minikube/proxy-client-ca.key
I0904 04:18:55.926146 414464 certs.go:256] generating profile certs ...
I0904 04:18:55.926207 414464 certs.go:363] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21409-385918/.minikube/profiles/dockerenv-217193/client.key
I0904 04:18:55.926230 414464 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21409-385918/.minikube/profiles/dockerenv-217193/client.crt with IP's: []
I0904 04:18:56.352438 414464 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21409-385918/.minikube/profiles/dockerenv-217193/client.crt ...
I0904 04:18:56.352457 414464 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21409-385918/.minikube/profiles/dockerenv-217193/client.crt: {Name:mke685c1ec821f6be12165e61eafd4eafce4f9ad Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0904 04:18:56.352656 414464 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21409-385918/.minikube/profiles/dockerenv-217193/client.key ...
I0904 04:18:56.352663 414464 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21409-385918/.minikube/profiles/dockerenv-217193/client.key: {Name:mkc5a50c58dbbaf41edb1786a83849d401615e66 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0904 04:18:56.352751 414464 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21409-385918/.minikube/profiles/dockerenv-217193/apiserver.key.aae209bc
I0904 04:18:56.352763 414464 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21409-385918/.minikube/profiles/dockerenv-217193/apiserver.crt.aae209bc with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2]
I0904 04:18:56.860843 414464 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21409-385918/.minikube/profiles/dockerenv-217193/apiserver.crt.aae209bc ...
I0904 04:18:56.860861 414464 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21409-385918/.minikube/profiles/dockerenv-217193/apiserver.crt.aae209bc: {Name:mkda3b19353ad437ec208214cae5b6996422642a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0904 04:18:56.861033 414464 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21409-385918/.minikube/profiles/dockerenv-217193/apiserver.key.aae209bc ...
I0904 04:18:56.861042 414464 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21409-385918/.minikube/profiles/dockerenv-217193/apiserver.key.aae209bc: {Name:mka7fb08587b56e736280dfacdf26c70a2ef57d2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0904 04:18:56.861112 414464 certs.go:381] copying /home/jenkins/minikube-integration/21409-385918/.minikube/profiles/dockerenv-217193/apiserver.crt.aae209bc -> /home/jenkins/minikube-integration/21409-385918/.minikube/profiles/dockerenv-217193/apiserver.crt
I0904 04:18:56.861180 414464 certs.go:385] copying /home/jenkins/minikube-integration/21409-385918/.minikube/profiles/dockerenv-217193/apiserver.key.aae209bc -> /home/jenkins/minikube-integration/21409-385918/.minikube/profiles/dockerenv-217193/apiserver.key
I0904 04:18:56.861225 414464 certs.go:363] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21409-385918/.minikube/profiles/dockerenv-217193/proxy-client.key
I0904 04:18:56.861235 414464 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21409-385918/.minikube/profiles/dockerenv-217193/proxy-client.crt with IP's: []
I0904 04:18:56.996977 414464 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21409-385918/.minikube/profiles/dockerenv-217193/proxy-client.crt ...
I0904 04:18:56.996994 414464 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21409-385918/.minikube/profiles/dockerenv-217193/proxy-client.crt: {Name:mk3ca3675ce85cbdbe51e0f8b2b33a9b62f5b1a6 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0904 04:18:56.997181 414464 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21409-385918/.minikube/profiles/dockerenv-217193/proxy-client.key ...
I0904 04:18:56.997189 414464 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21409-385918/.minikube/profiles/dockerenv-217193/proxy-client.key: {Name:mk09e811d38e9dbb18f8babfd8b072798e45fc92 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0904 04:18:56.997363 414464 certs.go:484] found cert: /home/jenkins/minikube-integration/21409-385918/.minikube/certs/ca-key.pem (1679 bytes)
I0904 04:18:56.997396 414464 certs.go:484] found cert: /home/jenkins/minikube-integration/21409-385918/.minikube/certs/ca.pem (1078 bytes)
I0904 04:18:56.997425 414464 certs.go:484] found cert: /home/jenkins/minikube-integration/21409-385918/.minikube/certs/cert.pem (1123 bytes)
I0904 04:18:56.997444 414464 certs.go:484] found cert: /home/jenkins/minikube-integration/21409-385918/.minikube/certs/key.pem (1675 bytes)
I0904 04:18:56.998115 414464 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-385918/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0904 04:18:57.020742 414464 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-385918/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I0904 04:18:57.041990 414464 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-385918/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0904 04:18:57.063296 414464 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-385918/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I0904 04:18:57.084333 414464 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-385918/.minikube/profiles/dockerenv-217193/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I0904 04:18:57.104814 414464 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-385918/.minikube/profiles/dockerenv-217193/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I0904 04:18:57.125456 414464 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-385918/.minikube/profiles/dockerenv-217193/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0904 04:18:57.147189 414464 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-385918/.minikube/profiles/dockerenv-217193/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I0904 04:18:57.168510 414464 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-385918/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0904 04:18:57.190947 414464 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0904 04:18:57.207178 414464 ssh_runner.go:195] Run: openssl version
I0904 04:18:57.211993 414464 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0904 04:18:57.220021 414464 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0904 04:18:57.222943 414464 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Sep 4 04:13 /usr/share/ca-certificates/minikubeCA.pem
I0904 04:18:57.222989 414464 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0904 04:18:57.228913 414464 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0904 04:18:57.237384 414464 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0904 04:18:57.240536 414464 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0904 04:18:57.240570 414464 kubeadm.go:392] StartCluster: {Name:dockerenv-217193 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc Memory:8000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:dockerenv-217193 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServe
rNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSH
AgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0904 04:18:57.240626 414464 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I0904 04:18:57.240667 414464 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I0904 04:18:57.272918 414464 cri.go:89] found id: ""
I0904 04:18:57.272968 414464 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0904 04:18:57.281136 414464 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0904 04:18:57.289119 414464 kubeadm.go:214] ignoring SystemVerification for kubeadm because of docker driver
I0904 04:18:57.289170 414464 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0904 04:18:57.296729 414464 kubeadm.go:155] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0904 04:18:57.296736 414464 kubeadm.go:157] found existing configuration files:
I0904 04:18:57.296771 414464 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I0904 04:18:57.303977 414464 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I0904 04:18:57.304014 414464 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I0904 04:18:57.311079 414464 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I0904 04:18:57.318461 414464 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I0904 04:18:57.318505 414464 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I0904 04:18:57.326086 414464 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I0904 04:18:57.333929 414464 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I0904 04:18:57.333966 414464 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0904 04:18:57.341608 414464 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I0904 04:18:57.349301 414464 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I0904 04:18:57.349339 414464 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0904 04:18:57.356775 414464 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0904 04:18:57.393291 414464 kubeadm.go:310] [init] Using Kubernetes version: v1.34.0
I0904 04:18:57.393339 414464 kubeadm.go:310] [preflight] Running pre-flight checks
I0904 04:18:57.409318 414464 kubeadm.go:310] [preflight] The system verification failed. Printing the output from the verification:
I0904 04:18:57.409393 414464 kubeadm.go:310] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1083-gcp[0m
I0904 04:18:57.409433 414464 kubeadm.go:310] [0;37mOS[0m: [0;32mLinux[0m
I0904 04:18:57.409490 414464 kubeadm.go:310] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0904 04:18:57.409575 414464 kubeadm.go:310] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I0904 04:18:57.409659 414464 kubeadm.go:310] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0904 04:18:57.409709 414464 kubeadm.go:310] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0904 04:18:57.409749 414464 kubeadm.go:310] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0904 04:18:57.409803 414464 kubeadm.go:310] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0904 04:18:57.409843 414464 kubeadm.go:310] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0904 04:18:57.409883 414464 kubeadm.go:310] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0904 04:18:57.409936 414464 kubeadm.go:310] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I0904 04:18:57.461305 414464 kubeadm.go:310] [preflight] Pulling images required for setting up a Kubernetes cluster
I0904 04:18:57.461506 414464 kubeadm.go:310] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0904 04:18:57.461625 414464 kubeadm.go:310] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I0904 04:18:57.466531 414464 kubeadm.go:310] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0904 04:18:57.469335 414464 out.go:252] - Generating certificates and keys ...
I0904 04:18:57.469438 414464 kubeadm.go:310] [certs] Using existing ca certificate authority
I0904 04:18:57.469527 414464 kubeadm.go:310] [certs] Using existing apiserver certificate and key on disk
I0904 04:18:57.921035 414464 kubeadm.go:310] [certs] Generating "apiserver-kubelet-client" certificate and key
I0904 04:18:58.080421 414464 kubeadm.go:310] [certs] Generating "front-proxy-ca" certificate and key
I0904 04:18:58.614152 414464 kubeadm.go:310] [certs] Generating "front-proxy-client" certificate and key
I0904 04:18:58.817713 414464 kubeadm.go:310] [certs] Generating "etcd/ca" certificate and key
I0904 04:18:59.008514 414464 kubeadm.go:310] [certs] Generating "etcd/server" certificate and key
I0904 04:18:59.008692 414464 kubeadm.go:310] [certs] etcd/server serving cert is signed for DNS names [dockerenv-217193 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0904 04:18:59.356284 414464 kubeadm.go:310] [certs] Generating "etcd/peer" certificate and key
I0904 04:18:59.356394 414464 kubeadm.go:310] [certs] etcd/peer serving cert is signed for DNS names [dockerenv-217193 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0904 04:18:59.396235 414464 kubeadm.go:310] [certs] Generating "etcd/healthcheck-client" certificate and key
I0904 04:18:59.574791 414464 kubeadm.go:310] [certs] Generating "apiserver-etcd-client" certificate and key
I0904 04:18:59.983321 414464 kubeadm.go:310] [certs] Generating "sa" key and public key
I0904 04:18:59.983500 414464 kubeadm.go:310] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0904 04:19:00.222498 414464 kubeadm.go:310] [kubeconfig] Writing "admin.conf" kubeconfig file
I0904 04:19:00.359681 414464 kubeadm.go:310] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I0904 04:19:01.127200 414464 kubeadm.go:310] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0904 04:19:01.582565 414464 kubeadm.go:310] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0904 04:19:02.049700 414464 kubeadm.go:310] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0904 04:19:02.050096 414464 kubeadm.go:310] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0904 04:19:02.052228 414464 kubeadm.go:310] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0904 04:19:02.054128 414464 out.go:252] - Booting up control plane ...
I0904 04:19:02.054256 414464 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0904 04:19:02.054324 414464 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0904 04:19:02.054388 414464 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0904 04:19:02.063039 414464 kubeadm.go:310] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0904 04:19:02.063165 414464 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I0904 04:19:02.069553 414464 kubeadm.go:310] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I0904 04:19:02.069977 414464 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0904 04:19:02.070013 414464 kubeadm.go:310] [kubelet-start] Starting the kubelet
I0904 04:19:02.148681 414464 kubeadm.go:310] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I0904 04:19:02.148777 414464 kubeadm.go:310] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I0904 04:19:03.184995 414464 kubeadm.go:310] [kubelet-check] The kubelet is healthy after 1.035790903s
I0904 04:19:03.190002 414464 kubeadm.go:310] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
I0904 04:19:03.190127 414464 kubeadm.go:310] [control-plane-check] Checking kube-apiserver at https://192.168.49.2:8443/livez
I0904 04:19:03.190283 414464 kubeadm.go:310] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
I0904 04:19:03.190389 414464 kubeadm.go:310] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
I0904 04:19:05.303486 414464 kubeadm.go:310] [control-plane-check] kube-controller-manager is healthy after 2.113375004s
I0904 04:19:06.305696 414464 kubeadm.go:310] [control-plane-check] kube-scheduler is healthy after 3.115689782s
I0904 04:19:08.191630 414464 kubeadm.go:310] [control-plane-check] kube-apiserver is healthy after 5.001556828s
I0904 04:19:08.202318 414464 kubeadm.go:310] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0904 04:19:08.211805 414464 kubeadm.go:310] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I0904 04:19:08.219316 414464 kubeadm.go:310] [upload-certs] Skipping phase. Please see --upload-certs
I0904 04:19:08.219481 414464 kubeadm.go:310] [mark-control-plane] Marking the node dockerenv-217193 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I0904 04:19:08.226878 414464 kubeadm.go:310] [bootstrap-token] Using token: 9bnilw.7c3gesdn7po0bmui
I0904 04:19:08.228095 414464 out.go:252] - Configuring RBAC rules ...
I0904 04:19:08.228235 414464 kubeadm.go:310] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I0904 04:19:08.231704 414464 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I0904 04:19:08.237356 414464 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I0904 04:19:08.239718 414464 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I0904 04:19:08.242100 414464 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I0904 04:19:08.244647 414464 kubeadm.go:310] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I0904 04:19:08.597116 414464 kubeadm.go:310] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I0904 04:19:09.013700 414464 kubeadm.go:310] [addons] Applied essential addon: CoreDNS
I0904 04:19:09.598017 414464 kubeadm.go:310] [addons] Applied essential addon: kube-proxy
I0904 04:19:09.598875 414464 kubeadm.go:310]
I0904 04:19:09.598930 414464 kubeadm.go:310] Your Kubernetes control-plane has initialized successfully!
I0904 04:19:09.598934 414464 kubeadm.go:310]
I0904 04:19:09.598998 414464 kubeadm.go:310] To start using your cluster, you need to run the following as a regular user:
I0904 04:19:09.599001 414464 kubeadm.go:310]
I0904 04:19:09.599038 414464 kubeadm.go:310] mkdir -p $HOME/.kube
I0904 04:19:09.599088 414464 kubeadm.go:310] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I0904 04:19:09.599125 414464 kubeadm.go:310] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I0904 04:19:09.599128 414464 kubeadm.go:310]
I0904 04:19:09.599172 414464 kubeadm.go:310] Alternatively, if you are the root user, you can run:
I0904 04:19:09.599177 414464 kubeadm.go:310]
I0904 04:19:09.599213 414464 kubeadm.go:310] export KUBECONFIG=/etc/kubernetes/admin.conf
I0904 04:19:09.599216 414464 kubeadm.go:310]
I0904 04:19:09.599255 414464 kubeadm.go:310] You should now deploy a pod network to the cluster.
I0904 04:19:09.599317 414464 kubeadm.go:310] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I0904 04:19:09.599368 414464 kubeadm.go:310] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I0904 04:19:09.599371 414464 kubeadm.go:310]
I0904 04:19:09.599450 414464 kubeadm.go:310] You can now join any number of control-plane nodes by copying certificate authorities
I0904 04:19:09.599508 414464 kubeadm.go:310] and service account keys on each node and then running the following as root:
I0904 04:19:09.599510 414464 kubeadm.go:310]
I0904 04:19:09.599601 414464 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token 9bnilw.7c3gesdn7po0bmui \
I0904 04:19:09.599685 414464 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:23a2f3c6605ae485931544405f3f71ce4698de62429327be8a7935a80b3bf3e4 \
I0904 04:19:09.599700 414464 kubeadm.go:310] --control-plane
I0904 04:19:09.599703 414464 kubeadm.go:310]
I0904 04:19:09.599767 414464 kubeadm.go:310] Then you can join any number of worker nodes by running the following on each as root:
I0904 04:19:09.599784 414464 kubeadm.go:310]
I0904 04:19:09.599896 414464 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token 9bnilw.7c3gesdn7po0bmui \
I0904 04:19:09.600021 414464 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:23a2f3c6605ae485931544405f3f71ce4698de62429327be8a7935a80b3bf3e4
I0904 04:19:09.602731 414464 kubeadm.go:310] [WARNING SystemVerification]: cgroups v1 support is in maintenance mode, please migrate to cgroups v2
I0904 04:19:09.602970 414464 kubeadm.go:310] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1083-gcp\n", err: exit status 1
I0904 04:19:09.603076 414464 kubeadm.go:310] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0904 04:19:09.603116 414464 cni.go:84] Creating CNI manager for ""
I0904 04:19:09.603125 414464 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I0904 04:19:09.604516 414464 out.go:179] * Configuring CNI (Container Networking Interface) ...
I0904 04:19:09.605475 414464 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I0904 04:19:09.609124 414464 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.34.0/kubectl ...
I0904 04:19:09.609133 414464 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I0904 04:19:09.625323 414464 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I0904 04:19:09.818893 414464 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I0904 04:19:09.818949 414464 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I0904 04:19:09.818978 414464 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes dockerenv-217193 minikube.k8s.io/updated_at=2025_09_04T04_19_09_0700 minikube.k8s.io/version=v1.36.0 minikube.k8s.io/commit=3abc733bafe6a1418dd7bd66760037215e6f0530 minikube.k8s.io/name=dockerenv-217193 minikube.k8s.io/primary=true
I0904 04:19:09.826411 414464 ops.go:34] apiserver oom_adj: -16
I0904 04:19:09.914911 414464 kubeadm.go:1105] duration metric: took 96.008209ms to wait for elevateKubeSystemPrivileges
I0904 04:19:09.914935 414464 kubeadm.go:394] duration metric: took 12.674369943s to StartCluster
I0904 04:19:09.914954 414464 settings.go:142] acquiring lock: {Name:mk8f6cb14c2459372c45d893ebfdcf0fb4723051 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0904 04:19:09.915016 414464 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21409-385918/kubeconfig
I0904 04:19:09.915720 414464 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21409-385918/kubeconfig: {Name:mkd65c9fc5b98524fc254dfc0926c25e1ae26b4f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0904 04:19:09.915929 414464 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I0904 04:19:09.915925 414464 start.go:235] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0904 04:19:09.916005 414464 addons.go:511] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I0904 04:19:09.916120 414464 addons.go:69] Setting storage-provisioner=true in profile "dockerenv-217193"
I0904 04:19:09.916135 414464 addons.go:238] Setting addon storage-provisioner=true in "dockerenv-217193"
I0904 04:19:09.916157 414464 config.go:182] Loaded profile config "dockerenv-217193": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.0
I0904 04:19:09.916167 414464 host.go:66] Checking if "dockerenv-217193" exists ...
I0904 04:19:09.916171 414464 addons.go:69] Setting default-storageclass=true in profile "dockerenv-217193"
I0904 04:19:09.916222 414464 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "dockerenv-217193"
I0904 04:19:09.916594 414464 cli_runner.go:164] Run: docker container inspect dockerenv-217193 --format={{.State.Status}}
I0904 04:19:09.916767 414464 cli_runner.go:164] Run: docker container inspect dockerenv-217193 --format={{.State.Status}}
I0904 04:19:09.920635 414464 out.go:179] * Verifying Kubernetes components...
I0904 04:19:09.921836 414464 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0904 04:19:09.944960 414464 addons.go:238] Setting addon default-storageclass=true in "dockerenv-217193"
I0904 04:19:09.944990 414464 host.go:66] Checking if "dockerenv-217193" exists ...
I0904 04:19:09.945356 414464 cli_runner.go:164] Run: docker container inspect dockerenv-217193 --format={{.State.Status}}
I0904 04:19:09.945936 414464 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0904 04:19:09.946944 414464 addons.go:435] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0904 04:19:09.946955 414464 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0904 04:19:09.946995 414464 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-217193
I0904 04:19:09.962877 414464 addons.go:435] installing /etc/kubernetes/addons/storageclass.yaml
I0904 04:19:09.962897 414464 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0904 04:19:09.962965 414464 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-217193
I0904 04:19:09.963629 414464 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33148 SSHKeyPath:/home/jenkins/minikube-integration/21409-385918/.minikube/machines/dockerenv-217193/id_rsa Username:docker}
I0904 04:19:09.979441 414464 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33148 SSHKeyPath:/home/jenkins/minikube-integration/21409-385918/.minikube/machines/dockerenv-217193/id_rsa Username:docker}
I0904 04:19:10.111405 414464 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I0904 04:19:10.117040 414464 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0904 04:19:10.200101 414464 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0904 04:19:10.203475 414464 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0904 04:19:10.487367 414464 start.go:976] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
I0904 04:19:10.488404 414464 api_server.go:52] waiting for apiserver process to appear ...
I0904 04:19:10.488449 414464 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0904 04:19:10.647457 414464 api_server.go:72] duration metric: took 731.50395ms to wait for apiserver process to appear ...
I0904 04:19:10.647476 414464 api_server.go:88] waiting for apiserver healthz status ...
I0904 04:19:10.647497 414464 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I0904 04:19:10.648572 414464 out.go:179] * Enabled addons: default-storageclass, storage-provisioner
I0904 04:19:10.649455 414464 addons.go:514] duration metric: took 733.445979ms for enable addons: enabled=[default-storageclass storage-provisioner]
I0904 04:19:10.652942 414464 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I0904 04:19:10.654026 414464 api_server.go:141] control plane version: v1.34.0
I0904 04:19:10.654054 414464 api_server.go:131] duration metric: took 6.572691ms to wait for apiserver health ...
I0904 04:19:10.654074 414464 system_pods.go:43] waiting for kube-system pods to appear ...
I0904 04:19:10.660627 414464 system_pods.go:59] 5 kube-system pods found
I0904 04:19:10.660651 414464 system_pods.go:61] "etcd-dockerenv-217193" [08c8c8a2-d659-4ef0-8cdf-0c2146c1df0a] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I0904 04:19:10.660659 414464 system_pods.go:61] "kube-apiserver-dockerenv-217193" [dbddf88b-5005-453b-a120-543e88d4aff4] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I0904 04:19:10.660667 414464 system_pods.go:61] "kube-controller-manager-dockerenv-217193" [342d8967-72a7-4537-b86d-ce724e452a1f] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I0904 04:19:10.660674 414464 system_pods.go:61] "kube-scheduler-dockerenv-217193" [47cd1f6f-3506-4e8c-ac8f-8ffac2a1aca1] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I0904 04:19:10.660679 414464 system_pods.go:61] "storage-provisioner" [ffb965cc-c095-4882-8791-04705cf7da12] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }. no new claims to deallocate, preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling.)
I0904 04:19:10.660686 414464 system_pods.go:74] duration metric: took 6.605855ms to wait for pod list to return data ...
I0904 04:19:10.660699 414464 kubeadm.go:578] duration metric: took 744.753705ms to wait for: map[apiserver:true system_pods:true]
I0904 04:19:10.660713 414464 node_conditions.go:102] verifying NodePressure condition ...
I0904 04:19:10.688802 414464 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0904 04:19:10.688826 414464 node_conditions.go:123] node cpu capacity is 8
I0904 04:19:10.688840 414464 node_conditions.go:105] duration metric: took 28.121852ms to run NodePressure ...
I0904 04:19:10.688857 414464 start.go:241] waiting for startup goroutines ...
I0904 04:19:10.991147 414464 kapi.go:214] "coredns" deployment in "kube-system" namespace and "dockerenv-217193" context rescaled to 1 replicas
I0904 04:19:10.991174 414464 start.go:246] waiting for cluster config update ...
I0904 04:19:10.991184 414464 start.go:255] writing updated cluster config ...
I0904 04:19:10.991463 414464 ssh_runner.go:195] Run: rm -f paused
I0904 04:19:11.037473 414464 start.go:617] kubectl: 1.33.2, cluster: 1.34.0 (minor skew: 1)
I0904 04:19:11.039046 414464 out.go:179] * Done! kubectl is now configured to use "dockerenv-217193" cluster and "default" namespace by default
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
23323c4022a5b 6e38f40d628db 10 seconds ago Running storage-provisioner 0 0f69dcc06d55e storage-provisioner
5bc6827155b31 409467f978b4a 10 seconds ago Running kindnet-cni 0 54eed6739b651 kindnet-mnwr5
c1a1346561256 df0860106674d 10 seconds ago Running kube-proxy 0 c04e59eb842bf kube-proxy-9pb58
648b293f7c73c 46169d968e920 22 seconds ago Running kube-scheduler 0 af0d190160327 kube-scheduler-dockerenv-217193
0f65722abb6ff 5f1f5298c888d 22 seconds ago Running etcd 0 224118f65eaa7 etcd-dockerenv-217193
11708c7261969 90550c43ad2bc 22 seconds ago Running kube-apiserver 0 d2873668b9312 kube-apiserver-dockerenv-217193
2d1ede8793cd4 a0af72f2ec6d6 22 seconds ago Running kube-controller-manager 0 30d37c312ab36 kube-controller-manager-dockerenv-217193
==> containerd <==
Sep 04 04:19:03 dockerenv-217193 containerd[870]: time="2025-09-04T04:19:03.512710280Z" level=info msg="StartContainer for \"648b293f7c73c0466d83687e06eda5c577b3a24c874c1b0e36d83326e2367613\" returns successfully"
Sep 04 04:19:14 dockerenv-217193 containerd[870]: time="2025-09-04T04:19:14.457901130Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kube-proxy-9pb58,Uid:0e0b3d30-8a2f-4faa-b64f-aca99f78f6e1,Namespace:kube-system,Attempt:0,}"
Sep 04 04:19:14 dockerenv-217193 containerd[870]: time="2025-09-04T04:19:14.458943975Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kindnet-mnwr5,Uid:9f21759e-20e4-449e-9e58-bd5928a0a693,Namespace:kube-system,Attempt:0,}"
Sep 04 04:19:14 dockerenv-217193 containerd[870]: time="2025-09-04T04:19:14.513332282Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kube-proxy-9pb58,Uid:0e0b3d30-8a2f-4faa-b64f-aca99f78f6e1,Namespace:kube-system,Attempt:0,} returns sandbox id \"c04e59eb842bf3ac778c7b3e4dcbb5e87379940c008ff79adcebc7c6f8985308\""
Sep 04 04:19:14 dockerenv-217193 containerd[870]: time="2025-09-04T04:19:14.518501202Z" level=info msg="CreateContainer within sandbox \"c04e59eb842bf3ac778c7b3e4dcbb5e87379940c008ff79adcebc7c6f8985308\" for container &ContainerMetadata{Name:kube-proxy,Attempt:0,}"
Sep 04 04:19:14 dockerenv-217193 containerd[870]: time="2025-09-04T04:19:14.528085486Z" level=info msg="CreateContainer within sandbox \"c04e59eb842bf3ac778c7b3e4dcbb5e87379940c008ff79adcebc7c6f8985308\" for &ContainerMetadata{Name:kube-proxy,Attempt:0,} returns container id \"c1a13465612563defe350071cbafa9a0879fda386e30f8921a8e251dcc0bbf6e\""
Sep 04 04:19:14 dockerenv-217193 containerd[870]: time="2025-09-04T04:19:14.528541684Z" level=info msg="StartContainer for \"c1a13465612563defe350071cbafa9a0879fda386e30f8921a8e251dcc0bbf6e\""
Sep 04 04:19:14 dockerenv-217193 containerd[870]: time="2025-09-04T04:19:14.578329282Z" level=info msg="StartContainer for \"c1a13465612563defe350071cbafa9a0879fda386e30f8921a8e251dcc0bbf6e\" returns successfully"
Sep 04 04:19:14 dockerenv-217193 containerd[870]: time="2025-09-04T04:19:14.745994765Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bc5c9577-8gzd7,Uid:adb705d0-af19-4565-8472-9065c0285819,Namespace:kube-system,Attempt:0,}"
Sep 04 04:19:14 dockerenv-217193 containerd[870]: time="2025-09-04T04:19:14.763597353Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bc5c9577-8gzd7,Uid:adb705d0-af19-4565-8472-9065c0285819,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"454448924ca646766924f8271c3f77cdff4d330b0284a90ceb3fe2128ee12d81\": failed to find network info for sandbox \"454448924ca646766924f8271c3f77cdff4d330b0284a90ceb3fe2128ee12d81\""
Sep 04 04:19:14 dockerenv-217193 containerd[870]: time="2025-09-04T04:19:14.795593708Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kindnet-mnwr5,Uid:9f21759e-20e4-449e-9e58-bd5928a0a693,Namespace:kube-system,Attempt:0,} returns sandbox id \"54eed6739b6515f609593324777c130f3195e002ba224d7f48fc99fa4f48db35\""
Sep 04 04:19:14 dockerenv-217193 containerd[870]: time="2025-09-04T04:19:14.800045549Z" level=info msg="CreateContainer within sandbox \"54eed6739b6515f609593324777c130f3195e002ba224d7f48fc99fa4f48db35\" for container &ContainerMetadata{Name:kindnet-cni,Attempt:0,}"
Sep 04 04:19:14 dockerenv-217193 containerd[870]: time="2025-09-04T04:19:14.808287800Z" level=info msg="CreateContainer within sandbox \"54eed6739b6515f609593324777c130f3195e002ba224d7f48fc99fa4f48db35\" for &ContainerMetadata{Name:kindnet-cni,Attempt:0,} returns container id \"5bc6827155b3191b26297acb4221cf3df4a391076cb72be237636880a06a4a84\""
Sep 04 04:19:14 dockerenv-217193 containerd[870]: time="2025-09-04T04:19:14.808716500Z" level=info msg="StartContainer for \"5bc6827155b3191b26297acb4221cf3df4a391076cb72be237636880a06a4a84\""
Sep 04 04:19:14 dockerenv-217193 containerd[870]: time="2025-09-04T04:19:14.898077301Z" level=info msg="StartContainer for \"5bc6827155b3191b26297acb4221cf3df4a391076cb72be237636880a06a4a84\" returns successfully"
Sep 04 04:19:14 dockerenv-217193 containerd[870]: time="2025-09-04T04:19:14.992898107Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:storage-provisioner,Uid:ffb965cc-c095-4882-8791-04705cf7da12,Namespace:kube-system,Attempt:0,}"
Sep 04 04:19:15 dockerenv-217193 containerd[870]: time="2025-09-04T04:19:15.060161995Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:storage-provisioner,Uid:ffb965cc-c095-4882-8791-04705cf7da12,Namespace:kube-system,Attempt:0,} returns sandbox id \"0f69dcc06d55e169451195e23a8cc3177dee08f2a075474f170791b9cb87a81a\""
Sep 04 04:19:15 dockerenv-217193 containerd[870]: time="2025-09-04T04:19:15.065339381Z" level=info msg="CreateContainer within sandbox \"0f69dcc06d55e169451195e23a8cc3177dee08f2a075474f170791b9cb87a81a\" for container &ContainerMetadata{Name:storage-provisioner,Attempt:0,}"
Sep 04 04:19:15 dockerenv-217193 containerd[870]: time="2025-09-04T04:19:15.090532939Z" level=info msg="CreateContainer within sandbox \"0f69dcc06d55e169451195e23a8cc3177dee08f2a075474f170791b9cb87a81a\" for &ContainerMetadata{Name:storage-provisioner,Attempt:0,} returns container id \"23323c4022a5b50574cb1782e4c351d3df7f0268c7878fa1b53aec138739eb3f\""
Sep 04 04:19:15 dockerenv-217193 containerd[870]: time="2025-09-04T04:19:15.091172989Z" level=info msg="StartContainer for \"23323c4022a5b50574cb1782e4c351d3df7f0268c7878fa1b53aec138739eb3f\""
Sep 04 04:19:15 dockerenv-217193 containerd[870]: time="2025-09-04T04:19:15.131228733Z" level=info msg="StartContainer for \"23323c4022a5b50574cb1782e4c351d3df7f0268c7878fa1b53aec138739eb3f\" returns successfully"
Sep 04 04:19:19 dockerenv-217193 containerd[870]: time="2025-09-04T04:19:19.334423589Z" level=info msg="No cni config template is specified, wait for other system components to drop the config."
Sep 04 04:19:23 dockerenv-217193 containerd[870]: time="2025-09-04T04:19:23.703565859Z" level=info msg="ImageCreate event name:\"docker.io/local/minikube-dockerenv-containerd-test:latest\""
Sep 04 04:19:23 dockerenv-217193 containerd[870]: time="2025-09-04T04:19:23.709720282Z" level=info msg="ImageCreate event name:\"sha256:b5071690d691e592d1838713d34f6e17359e609f6f72854cb670728c823ff7a7\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Sep 04 04:19:23 dockerenv-217193 containerd[870]: time="2025-09-04T04:19:23.710198243Z" level=info msg="ImageUpdate event name:\"docker.io/local/minikube-dockerenv-containerd-test:latest\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
==> describe nodes <==
Name: dockerenv-217193
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=dockerenv-217193
kubernetes.io/os=linux
minikube.k8s.io/commit=3abc733bafe6a1418dd7bd66760037215e6f0530
minikube.k8s.io/name=dockerenv-217193
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_09_04T04_19_09_0700
minikube.k8s.io/version=v1.36.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Thu, 04 Sep 2025 04:19:06 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: dockerenv-217193
AcquireTime: <unset>
RenewTime: Thu, 04 Sep 2025 04:19:18 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Thu, 04 Sep 2025 04:19:19 +0000 Thu, 04 Sep 2025 04:19:04 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Thu, 04 Sep 2025 04:19:19 +0000 Thu, 04 Sep 2025 04:19:04 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Thu, 04 Sep 2025 04:19:19 +0000 Thu, 04 Sep 2025 04:19:04 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Thu, 04 Sep 2025 04:19:19 +0000 Thu, 04 Sep 2025 04:19:06 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.2
Hostname: dockerenv-217193
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32859360Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32859360Ki
pods: 110
System Info:
Machine ID: 030de4d86bc9467f946a61ba6b5d9099
System UUID: 9a5b297c-2a4f-4722-af54-fb8051a0fe0f
Boot ID: 68caae6e-4dcf-4a37-934f-61939f76c834
Kernel Version: 5.15.0-1083-gcp
OS Image: Ubuntu 22.04.5 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: containerd://1.7.27
Kubelet Version: v1.34.0
Kube-Proxy Version:
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (8 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
kube-system coredns-66bc5c9577-8gzd7 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 11s
kube-system etcd-dockerenv-217193 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 16s
kube-system kindnet-mnwr5 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 11s
kube-system kube-apiserver-dockerenv-217193 250m (3%) 0 (0%) 0 (0%) 0 (0%) 16s
kube-system kube-controller-manager-dockerenv-217193 200m (2%) 0 (0%) 0 (0%) 0 (0%) 18s
kube-system kube-proxy-9pb58 0 (0%) 0 (0%) 0 (0%) 0 (0%) 11s
kube-system kube-scheduler-dockerenv-217193 100m (1%) 0 (0%) 0 (0%) 0 (0%) 16s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 15s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (10%) 100m (1%)
memory 220Mi (0%) 220Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 10s kube-proxy
Normal Starting 17s kubelet Starting kubelet.
Warning CgroupV1 17s kubelet cgroup v1 support is in maintenance mode, please migrate to cgroup v2
Normal NodeAllocatableEnforced 17s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 16s kubelet Node dockerenv-217193 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 16s kubelet Node dockerenv-217193 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 16s kubelet Node dockerenv-217193 status is now: NodeHasSufficientPID
Normal RegisteredNode 12s node-controller Node dockerenv-217193 event: Registered Node dockerenv-217193 in Controller
==> dmesg <==
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff da c6 e3 f7 65 46 08 06
[ +0.000327] IPv4: martian source 10.244.0.3 from 10.244.0.2, on dev eth0
[ +0.000004] ll header: 00000000: ff ff ff ff ff ff 02 ac 13 3b 75 f6 08 06
[Sep 4 03:55] IPv4: martian source 10.244.0.2 from 10.244.0.2, on dev bridge
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff f2 db a8 31 e2 62 08 06
[ +1.064536] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff 22 c8 72 6f 1b fa 08 06
[ +0.014846] IPv4: martian source 10.244.0.1 from 10.244.0.2, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff f2 db a8 31 e2 62 08 06
[ +31.300168] IPv4: martian source 10.244.0.1 from 10.244.0.2, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 32 55 d4 db 2a 96 08 06
[ +2.589730] IPv4: martian source 10.244.0.1 from 10.244.0.2, on dev eth0
[ +0.000044] ll header: 00000000: ff ff ff ff ff ff 16 2c a6 c1 c4 1b 08 06
[ +6.063495] IPv4: martian source 10.244.0.1 from 10.244.0.4, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff fe 49 80 8e 10 d6 08 06
[ +0.000382] IPv4: martian source 10.244.0.4 from 10.244.0.3, on dev eth0
[ +0.000004] ll header: 00000000: ff ff ff ff ff ff 22 c8 72 6f 1b fa 08 06
[ +3.577529] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 62 db c1 e7 c7 5b 08 06
[ +0.000364] IPv4: martian source 10.244.0.3 from 10.244.0.2, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 32 55 d4 db 2a 96 08 06
[ +8.471961] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 3e 04 f5 69 b3 bb 08 06
[ +0.000327] IPv4: martian source 10.244.0.3 from 10.244.0.2, on dev eth0
[ +0.000004] ll header: 00000000: ff ff ff ff ff ff 16 2c a6 c1 c4 1b 08 06
==> etcd [0f65722abb6ffc705c02ed98ef2211988a73747afc305f6919cab7e842be1c8c] <==
{"level":"warn","ts":"2025-09-04T04:19:05.324560Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:53580","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T04:19:05.331048Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:53588","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T04:19:05.386869Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:53600","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T04:19:05.392973Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:53614","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T04:19:05.399455Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:53632","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T04:19:05.410294Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:53652","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T04:19:05.417129Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:53660","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T04:19:05.429449Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:53696","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T04:19:05.435811Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:53722","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T04:19:05.459062Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:53742","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T04:19:05.484900Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:53772","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T04:19:05.490792Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:53810","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T04:19:05.496693Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:53812","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T04:19:05.503472Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:53824","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T04:19:05.509060Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:53846","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T04:19:05.515167Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:53878","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T04:19:05.521937Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:53902","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T04:19:05.528751Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:53920","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T04:19:05.534980Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:53934","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T04:19:05.541684Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:53944","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T04:19:05.547525Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:53958","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T04:19:05.589698Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:53978","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T04:19:05.597205Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:54020","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T04:19:05.603224Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:54032","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T04:19:05.687781Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:54048","server-name":"","error":"EOF"}
==> kernel <==
04:19:25 up 2:01, 0 users, load average: 1.16, 1.13, 1.05
Linux dockerenv-217193 5.15.0-1083-gcp #92~20.04.1-Ubuntu SMP Tue Apr 29 09:12:55 UTC 2025 x86_64 x86_64 x86_64 GNU/Linux
PRETTY_NAME="Ubuntu 22.04.5 LTS"
==> kindnet [5bc6827155b3191b26297acb4221cf3df4a391076cb72be237636880a06a4a84] <==
I0904 04:19:15.086074 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I0904 04:19:15.086285 1 main.go:139] hostIP = 192.168.49.2
podIP = 192.168.49.2
I0904 04:19:15.086428 1 main.go:148] setting mtu 1500 for CNI
I0904 04:19:15.086447 1 main.go:178] kindnetd IP family: "ipv4"
I0904 04:19:15.086460 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
time="2025-09-04T04:19:15Z" level=info msg="Created plugin 10-kube-network-policies (kindnetd, handles RunPodSandbox,RemovePodSandbox)"
I0904 04:19:15.292522 1 controller.go:377] "Starting controller" name="kube-network-policies"
I0904 04:19:15.292541 1 controller.go:381] "Waiting for informer caches to sync"
I0904 04:19:15.292549 1 shared_informer.go:350] "Waiting for caches to sync" controller="kube-network-policies"
I0904 04:19:15.292661 1 controller.go:390] nri plugin exited: failed to connect to NRI service: dial unix /var/run/nri/nri.sock: connect: no such file or directory
I0904 04:19:15.692860 1 shared_informer.go:357] "Caches are synced" controller="kube-network-policies"
I0904 04:19:15.692899 1 metrics.go:72] Registering metrics
I0904 04:19:15.692959 1 controller.go:711] "Syncing nftables rules"
I0904 04:19:25.295002 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I0904 04:19:25.295063 1 main.go:301] handling current node
==> kube-apiserver [11708c726196969bffe75fd9a1162adc6655ccf8a3ceccc143f9e0172d8035dd] <==
I0904 04:19:06.389834 1 shared_informer.go:356] "Caches are synced" controller="*generic.policySource[*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicy,*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicyBinding,k8s.io/apiserver/pkg/admission/plugin/policy/validating.Validator]"
I0904 04:19:06.389862 1 policy_source.go:240] refreshing policies
I0904 04:19:06.409348 1 controller.go:667] quota admission added evaluator for: namespaces
I0904 04:19:06.448349 1 cidrallocator.go:301] created ClusterIP allocator for Service CIDR 10.96.0.0/12
I0904 04:19:06.448349 1 default_servicecidr_controller.go:228] Setting default ServiceCIDR condition Ready to True
I0904 04:19:06.451894 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12
I0904 04:19:06.451914 1 default_servicecidr_controller.go:137] Shutting down kubernetes-service-cidr-controller
I0904 04:19:06.547048 1 controller.go:667] quota admission added evaluator for: leases.coordination.k8s.io
I0904 04:19:07.213016 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I0904 04:19:07.216654 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I0904 04:19:07.216678 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I0904 04:19:07.675408 1 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I0904 04:19:07.708540 1 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I0904 04:19:07.816478 1 alloc.go:328] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W0904 04:19:07.822152 1 lease.go:265] Resetting endpoints for master service "kubernetes" to [192.168.49.2]
I0904 04:19:07.823071 1 controller.go:667] quota admission added evaluator for: endpoints
I0904 04:19:07.826458 1 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io
I0904 04:19:08.231574 1 controller.go:667] quota admission added evaluator for: serviceaccounts
I0904 04:19:09.004458 1 controller.go:667] quota admission added evaluator for: deployments.apps
I0904 04:19:09.012872 1 alloc.go:328] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I0904 04:19:09.019822 1 controller.go:667] quota admission added evaluator for: daemonsets.apps
I0904 04:19:14.032344 1 controller.go:667] quota admission added evaluator for: replicasets.apps
I0904 04:19:14.132697 1 controller.go:667] quota admission added evaluator for: controllerrevisions.apps
I0904 04:19:14.183517 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12
I0904 04:19:14.186527 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12
==> kube-controller-manager [2d1ede8793cd4e5c0407bceaf56d3b6fffa01bbca4e588dd446f277edd822313] <==
I0904 04:19:13.229535 1 shared_informer.go:356] "Caches are synced" controller="HPA"
I0904 04:19:13.230721 1 shared_informer.go:356] "Caches are synced" controller="attach detach"
I0904 04:19:13.230740 1 shared_informer.go:356] "Caches are synced" controller="persistent volume"
I0904 04:19:13.230763 1 shared_informer.go:356] "Caches are synced" controller="TTL"
I0904 04:19:13.230859 1 shared_informer.go:356] "Caches are synced" controller="job"
I0904 04:19:13.230916 1 shared_informer.go:356] "Caches are synced" controller="endpoint_slice_mirroring"
I0904 04:19:13.230898 1 shared_informer.go:356] "Caches are synced" controller="TTL after finished"
I0904 04:19:13.230925 1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-kubelet-serving"
I0904 04:19:13.230874 1 shared_informer.go:356] "Caches are synced" controller="cronjob"
I0904 04:19:13.231463 1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-kubelet-client"
I0904 04:19:13.231488 1 shared_informer.go:356] "Caches are synced" controller="bootstrap_signer"
I0904 04:19:13.232045 1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-kube-apiserver-client"
I0904 04:19:13.232273 1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-legacy-unknown"
I0904 04:19:13.234547 1 shared_informer.go:356] "Caches are synced" controller="expand"
I0904 04:19:13.234599 1 shared_informer.go:356] "Caches are synced" controller="node"
I0904 04:19:13.234658 1 range_allocator.go:177] "Sending events to api server" logger="node-ipam-controller"
I0904 04:19:13.234696 1 range_allocator.go:183] "Starting range CIDR allocator" logger="node-ipam-controller"
I0904 04:19:13.234708 1 shared_informer.go:349] "Waiting for caches to sync" controller="cidrallocator"
I0904 04:19:13.234717 1 shared_informer.go:356] "Caches are synced" controller="cidrallocator"
I0904 04:19:13.236821 1 shared_informer.go:356] "Caches are synced" controller="resource quota"
I0904 04:19:13.236937 1 shared_informer.go:356] "Caches are synced" controller="resource quota"
I0904 04:19:13.240277 1 shared_informer.go:356] "Caches are synced" controller="PVC protection"
I0904 04:19:13.240579 1 range_allocator.go:428] "Set node PodCIDR" logger="node-ipam-controller" node="dockerenv-217193" podCIDRs=["10.244.0.0/24"]
I0904 04:19:13.245501 1 shared_informer.go:356] "Caches are synced" controller="legacy-service-account-token-cleaner"
I0904 04:19:13.251781 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
==> kube-proxy [c1a13465612563defe350071cbafa9a0879fda386e30f8921a8e251dcc0bbf6e] <==
I0904 04:19:14.608281 1 server_linux.go:53] "Using iptables proxy"
I0904 04:19:14.736031 1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache"
I0904 04:19:14.836409 1 shared_informer.go:356] "Caches are synced" controller="node informer cache"
I0904 04:19:14.836445 1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.49.2"]
E0904 04:19:14.836569 1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I0904 04:19:14.854400 1 server.go:265] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I0904 04:19:14.854469 1 server_linux.go:132] "Using iptables Proxier"
I0904 04:19:14.858364 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I0904 04:19:14.858802 1 server.go:527] "Version info" version="v1.34.0"
I0904 04:19:14.858861 1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0904 04:19:14.860076 1 config.go:403] "Starting serviceCIDR config controller"
I0904 04:19:14.860118 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
I0904 04:19:14.860130 1 config.go:200] "Starting service config controller"
I0904 04:19:14.860133 1 config.go:106] "Starting endpoint slice config controller"
I0904 04:19:14.860182 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
I0904 04:19:14.860149 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
I0904 04:19:14.860213 1 config.go:309] "Starting node config controller"
I0904 04:19:14.860278 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
I0904 04:19:14.860285 1 shared_informer.go:356] "Caches are synced" controller="node config"
I0904 04:19:14.960280 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
I0904 04:19:14.960303 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
I0904 04:19:14.960959 1 shared_informer.go:356] "Caches are synced" controller="service config"
==> kube-scheduler [648b293f7c73c0466d83687e06eda5c577b3a24c874c1b0e36d83326e2367613] <==
E0904 04:19:06.304076 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceSlice: resourceslices.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceslices\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceSlice"
E0904 04:19:06.303997 1 reflector.go:205] "Failed to watch" err="failed to list *v1.DeviceClass: deviceclasses.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"deviceclasses\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.DeviceClass"
E0904 04:19:06.304140 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StorageClass"
E0904 04:19:06.304140 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Pod: pods is forbidden: User \"system:kube-scheduler\" cannot list resource \"pods\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Pod"
E0904 04:19:06.304197 1 reflector.go:205] "Failed to watch" err="failed to list *v1.VolumeAttachment: volumeattachments.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"volumeattachments\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.VolumeAttachment"
E0904 04:19:06.304228 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver"
E0904 04:19:06.304311 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service"
E0904 04:19:06.304370 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Node: nodes is forbidden: User \"system:kube-scheduler\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node"
E0904 04:19:06.304380 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolume"
E0904 04:19:06.304413 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicaSet"
E0904 04:19:06.304431 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StatefulSet"
E0904 04:19:06.304433 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget"
E0904 04:19:06.304511 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Namespace"
E0904 04:19:06.304615 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIStorageCapacity"
E0904 04:19:07.112723 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget"
E0904 04:19:07.114565 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver"
E0904 04:19:07.155035 1 reflector.go:205] "Failed to watch" err="failed to list *v1.VolumeAttachment: volumeattachments.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"volumeattachments\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.VolumeAttachment"
E0904 04:19:07.181241 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Namespace"
E0904 04:19:07.367515 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolume"
E0904 04:19:07.368556 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csinodes\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSINode"
E0904 04:19:07.375650 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceClaim: resourceclaims.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceclaims\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceClaim"
E0904 04:19:07.434337 1 reflector.go:205] "Failed to watch" err="failed to list *v1.DeviceClass: deviceclasses.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"deviceclasses\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.DeviceClass"
E0904 04:19:07.449279 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StatefulSet"
E0904 04:19:07.505770 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Node: nodes is forbidden: User \"system:kube-scheduler\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node"
I0904 04:19:07.900900 1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
==> kubelet <==
Sep 04 04:19:13 dockerenv-217193 kubelet[1669]: E0904 04:19:13.314379 1669 projected.go:196] Error preparing data for projected volume kube-api-access-lsv98 for pod kube-system/storage-provisioner: configmap "kube-root-ca.crt" not found
Sep 04 04:19:13 dockerenv-217193 kubelet[1669]: E0904 04:19:13.314505 1669 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ffb965cc-c095-4882-8791-04705cf7da12-kube-api-access-lsv98 podName:ffb965cc-c095-4882-8791-04705cf7da12 nodeName:}" failed. No retries permitted until 2025-09-04 04:19:13.814476936 +0000 UTC m=+5.032698384 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-lsv98" (UniqueName: "kubernetes.io/projected/ffb965cc-c095-4882-8791-04705cf7da12-kube-api-access-lsv98") pod "storage-provisioner" (UID: "ffb965cc-c095-4882-8791-04705cf7da12") : configmap "kube-root-ca.crt" not found
Sep 04 04:19:13 dockerenv-217193 kubelet[1669]: E0904 04:19:13.911479 1669 projected.go:291] Couldn't get configMap kube-system/kube-root-ca.crt: configmap "kube-root-ca.crt" not found
Sep 04 04:19:13 dockerenv-217193 kubelet[1669]: E0904 04:19:13.911509 1669 projected.go:196] Error preparing data for projected volume kube-api-access-lsv98 for pod kube-system/storage-provisioner: configmap "kube-root-ca.crt" not found
Sep 04 04:19:13 dockerenv-217193 kubelet[1669]: E0904 04:19:13.911562 1669 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ffb965cc-c095-4882-8791-04705cf7da12-kube-api-access-lsv98 podName:ffb965cc-c095-4882-8791-04705cf7da12 nodeName:}" failed. No retries permitted until 2025-09-04 04:19:14.911547417 +0000 UTC m=+6.129768847 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-lsv98" (UniqueName: "kubernetes.io/projected/ffb965cc-c095-4882-8791-04705cf7da12-kube-api-access-lsv98") pod "storage-provisioner" (UID: "ffb965cc-c095-4882-8791-04705cf7da12") : configmap "kube-root-ca.crt" not found
Sep 04 04:19:14 dockerenv-217193 kubelet[1669]: I0904 04:19:14.212673 1669 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/0e0b3d30-8a2f-4faa-b64f-aca99f78f6e1-kube-proxy\") pod \"kube-proxy-9pb58\" (UID: \"0e0b3d30-8a2f-4faa-b64f-aca99f78f6e1\") " pod="kube-system/kube-proxy-9pb58"
Sep 04 04:19:14 dockerenv-217193 kubelet[1669]: I0904 04:19:14.212713 1669 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2q8c5\" (UniqueName: \"kubernetes.io/projected/0e0b3d30-8a2f-4faa-b64f-aca99f78f6e1-kube-api-access-2q8c5\") pod \"kube-proxy-9pb58\" (UID: \"0e0b3d30-8a2f-4faa-b64f-aca99f78f6e1\") " pod="kube-system/kube-proxy-9pb58"
Sep 04 04:19:14 dockerenv-217193 kubelet[1669]: I0904 04:19:14.212734 1669 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/9f21759e-20e4-449e-9e58-bd5928a0a693-cni-cfg\") pod \"kindnet-mnwr5\" (UID: \"9f21759e-20e4-449e-9e58-bd5928a0a693\") " pod="kube-system/kindnet-mnwr5"
Sep 04 04:19:14 dockerenv-217193 kubelet[1669]: I0904 04:19:14.212747 1669 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/9f21759e-20e4-449e-9e58-bd5928a0a693-lib-modules\") pod \"kindnet-mnwr5\" (UID: \"9f21759e-20e4-449e-9e58-bd5928a0a693\") " pod="kube-system/kindnet-mnwr5"
Sep 04 04:19:14 dockerenv-217193 kubelet[1669]: I0904 04:19:14.212762 1669 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnf59\" (UniqueName: \"kubernetes.io/projected/9f21759e-20e4-449e-9e58-bd5928a0a693-kube-api-access-wnf59\") pod \"kindnet-mnwr5\" (UID: \"9f21759e-20e4-449e-9e58-bd5928a0a693\") " pod="kube-system/kindnet-mnwr5"
Sep 04 04:19:14 dockerenv-217193 kubelet[1669]: I0904 04:19:14.212870 1669 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/0e0b3d30-8a2f-4faa-b64f-aca99f78f6e1-xtables-lock\") pod \"kube-proxy-9pb58\" (UID: \"0e0b3d30-8a2f-4faa-b64f-aca99f78f6e1\") " pod="kube-system/kube-proxy-9pb58"
Sep 04 04:19:14 dockerenv-217193 kubelet[1669]: I0904 04:19:14.212915 1669 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/0e0b3d30-8a2f-4faa-b64f-aca99f78f6e1-lib-modules\") pod \"kube-proxy-9pb58\" (UID: \"0e0b3d30-8a2f-4faa-b64f-aca99f78f6e1\") " pod="kube-system/kube-proxy-9pb58"
Sep 04 04:19:14 dockerenv-217193 kubelet[1669]: I0904 04:19:14.212932 1669 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/9f21759e-20e4-449e-9e58-bd5928a0a693-xtables-lock\") pod \"kindnet-mnwr5\" (UID: \"9f21759e-20e4-449e-9e58-bd5928a0a693\") " pod="kube-system/kindnet-mnwr5"
Sep 04 04:19:14 dockerenv-217193 kubelet[1669]: I0904 04:19:14.318651 1669 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory"
Sep 04 04:19:14 dockerenv-217193 kubelet[1669]: I0904 04:19:14.514454 1669 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/adb705d0-af19-4565-8472-9065c0285819-config-volume\") pod \"coredns-66bc5c9577-8gzd7\" (UID: \"adb705d0-af19-4565-8472-9065c0285819\") " pod="kube-system/coredns-66bc5c9577-8gzd7"
Sep 04 04:19:14 dockerenv-217193 kubelet[1669]: I0904 04:19:14.514487 1669 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gdzwp\" (UniqueName: \"kubernetes.io/projected/adb705d0-af19-4565-8472-9065c0285819-kube-api-access-gdzwp\") pod \"coredns-66bc5c9577-8gzd7\" (UID: \"adb705d0-af19-4565-8472-9065c0285819\") " pod="kube-system/coredns-66bc5c9577-8gzd7"
Sep 04 04:19:14 dockerenv-217193 kubelet[1669]: E0904 04:19:14.763902 1669 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"454448924ca646766924f8271c3f77cdff4d330b0284a90ceb3fe2128ee12d81\": failed to find network info for sandbox \"454448924ca646766924f8271c3f77cdff4d330b0284a90ceb3fe2128ee12d81\""
Sep 04 04:19:14 dockerenv-217193 kubelet[1669]: E0904 04:19:14.763974 1669 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"454448924ca646766924f8271c3f77cdff4d330b0284a90ceb3fe2128ee12d81\": failed to find network info for sandbox \"454448924ca646766924f8271c3f77cdff4d330b0284a90ceb3fe2128ee12d81\"" pod="kube-system/coredns-66bc5c9577-8gzd7"
Sep 04 04:19:14 dockerenv-217193 kubelet[1669]: E0904 04:19:14.763996 1669 kuberuntime_manager.go:1343] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"454448924ca646766924f8271c3f77cdff4d330b0284a90ceb3fe2128ee12d81\": failed to find network info for sandbox \"454448924ca646766924f8271c3f77cdff4d330b0284a90ceb3fe2128ee12d81\"" pod="kube-system/coredns-66bc5c9577-8gzd7"
Sep 04 04:19:14 dockerenv-217193 kubelet[1669]: E0904 04:19:14.764064 1669 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"coredns-66bc5c9577-8gzd7_kube-system(adb705d0-af19-4565-8472-9065c0285819)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"coredns-66bc5c9577-8gzd7_kube-system(adb705d0-af19-4565-8472-9065c0285819)\\\": rpc error: code = Unknown desc = failed to setup network for sandbox \\\"454448924ca646766924f8271c3f77cdff4d330b0284a90ceb3fe2128ee12d81\\\": failed to find network info for sandbox \\\"454448924ca646766924f8271c3f77cdff4d330b0284a90ceb3fe2128ee12d81\\\"\"" pod="kube-system/coredns-66bc5c9577-8gzd7" podUID="adb705d0-af19-4565-8472-9065c0285819"
Sep 04 04:19:14 dockerenv-217193 kubelet[1669]: I0904 04:19:14.924360 1669 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kindnet-mnwr5" podStartSLOduration=0.924341809 podStartE2EDuration="924.341809ms" podCreationTimestamp="2025-09-04 04:19:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-04 04:19:14.924150689 +0000 UTC m=+6.142372141" watchObservedRunningTime="2025-09-04 04:19:14.924341809 +0000 UTC m=+6.142563260"
Sep 04 04:19:15 dockerenv-217193 kubelet[1669]: I0904 04:19:15.925344 1669 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-proxy-9pb58" podStartSLOduration=1.92532444 podStartE2EDuration="1.92532444s" podCreationTimestamp="2025-09-04 04:19:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-04 04:19:14.932413794 +0000 UTC m=+6.150635245" watchObservedRunningTime="2025-09-04 04:19:15.92532444 +0000 UTC m=+7.143545892"
Sep 04 04:19:18 dockerenv-217193 kubelet[1669]: I0904 04:19:18.170180 1669 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=8.170163718 podStartE2EDuration="8.170163718s" podCreationTimestamp="2025-09-04 04:19:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-04 04:19:15.925503511 +0000 UTC m=+7.143724951" watchObservedRunningTime="2025-09-04 04:19:18.170163718 +0000 UTC m=+9.388385169"
Sep 04 04:19:19 dockerenv-217193 kubelet[1669]: I0904 04:19:19.333751 1669 kuberuntime_manager.go:1828] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24"
Sep 04 04:19:19 dockerenv-217193 kubelet[1669]: I0904 04:19:19.334723 1669 kubelet_network.go:47] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
==> storage-provisioner [23323c4022a5b50574cb1782e4c351d3df7f0268c7878fa1b53aec138739eb3f] <==
I0904 04:19:15.140512 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I0904 04:19:15.147847 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I0904 04:19:15.147897 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
W0904 04:19:15.149847 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W0904 04:19:15.153305 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I0904 04:19:15.153976 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I0904 04:19:15.154055 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"63422e87-776d-4d15-98d7-a78b7f6d9354", APIVersion:"v1", ResourceVersion:"381", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' dockerenv-217193_019e0ff1-ee0d-4256-b290-ed338b9cec7c became leader
I0904 04:19:15.154123 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_dockerenv-217193_019e0ff1-ee0d-4256-b290-ed338b9cec7c!
W0904 04:19:15.155870 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W0904 04:19:15.158406 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I0904 04:19:15.254677 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_dockerenv-217193_019e0ff1-ee0d-4256-b290-ed338b9cec7c!
W0904 04:19:17.161650 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W0904 04:19:17.166271 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W0904 04:19:19.169374 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W0904 04:19:19.173537 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W0904 04:19:21.176630 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W0904 04:19:21.180717 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W0904 04:19:23.184426 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W0904 04:19:23.187968 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W0904 04:19:25.191026 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W0904 04:19:25.194447 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p dockerenv-217193 -n dockerenv-217193
helpers_test.go:269: (dbg) Run: kubectl --context dockerenv-217193 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:280: non-running pods: coredns-66bc5c9577-8gzd7
helpers_test.go:282: ======> post-mortem[TestDockerEnvContainerd]: describe non-running pods <======
helpers_test.go:285: (dbg) Run: kubectl --context dockerenv-217193 describe pod coredns-66bc5c9577-8gzd7
helpers_test.go:285: (dbg) Non-zero exit: kubectl --context dockerenv-217193 describe pod coredns-66bc5c9577-8gzd7: exit status 1 (58.987013ms)
** stderr **
Error from server (NotFound): pods "coredns-66bc5c9577-8gzd7" not found
** /stderr **
helpers_test.go:287: kubectl --context dockerenv-217193 describe pod coredns-66bc5c9577-8gzd7: exit status 1
helpers_test.go:175: Cleaning up "dockerenv-217193" profile ...
helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p dockerenv-217193
helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p dockerenv-217193: (1.792673904s)
--- FAIL: TestDockerEnvContainerd (40.58s)